diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 01271f9c..2d31d19c 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -36,7 +36,7 @@ jobs:
- ubuntu-latest #x64
- ubuntu-24.04-arm #arm64
- windows-latest #x64
- - macos-13 #x64
+ - macos-15-intel #x64
- macos-latest #arm64
runs-on: ${{ matrix.os }}
steps:
diff --git a/.github/workflows/release-image.yml b/.github/workflows/release-image.yml
index d11dbf0d..903951b8 100644
--- a/.github/workflows/release-image.yml
+++ b/.github/workflows/release-image.yml
@@ -14,6 +14,7 @@ env:
jobs:
publish-platform-images:
name: 'Publish: linux-${{ matrix.platform.tag }}'
+ if: github.repository == 'containers/kubernetes-mcp-server'
strategy:
fail-fast: true
matrix:
@@ -47,6 +48,7 @@ jobs:
publish-manifest:
name: Publish Manifest
+ if: github.repository == 'containers/kubernetes-mcp-server'
runs-on: ubuntu-latest
needs: publish-platform-images
steps:
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index f048e7a8..c035b3a8 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -12,11 +12,11 @@ concurrency:
env:
GO_VERSION: 1.23
- NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
UV_PUBLISH_TOKEN: ${{ secrets.UV_PUBLISH_TOKEN }}
permissions:
contents: write
+ id-token: write # Required for npmjs OIDC
discussions: write
jobs:
@@ -39,6 +39,12 @@ jobs:
files: |
LICENSE
kubernetes-mcp-server-*
+ # Ensure npm 11.5.1 or later is installed (required for https://docs.npmjs.com/trusted-publishers)
+ - name: Setup node
+ uses: actions/setup-node@v6
+ with:
+ node-version: 24
+ registry-url: 'https://registry.npmjs.org'
- name: Publish npm
run:
make npm-publish
diff --git a/AGENTS.md b/AGENTS.md
index 854cfe5d..485ac1ad 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -11,12 +11,15 @@ This MCP server enables AI assistants (like Claude, Gemini, Cursor, and others)
- Go package layout follows the standard Go conventions:
- `cmd/kubernetes-mcp-server/` – main application entry point using Cobra CLI framework.
- `pkg/` – libraries grouped by domain.
+ - `api/` - API-related functionality, tool definitions, and toolset interfaces.
- `config/` – configuration management.
- `helm/` - Helm chart operations integration.
- `http/` - HTTP server and authorization middleware.
- `kubernetes/` - Kubernetes client management, authentication, and access control.
- `mcp/` - Model Context Protocol (MCP) server implementation with tool registration and STDIO/HTTP support.
- `output/` - output formatting and rendering.
+ - `toolsets/` - Toolset registration and management for MCP tools.
+ - `version/` - Version information management.
- `.github/` – GitHub-related configuration (Actions workflows, issue templates...).
- `docs/` – documentation files.
- `npm/` – Node packages that wraps the compiled binaries for distribution through npmjs.com.
@@ -30,6 +33,21 @@ Implement new functionality in the Go sources under `cmd/` and `pkg/`.
The JavaScript (`npm/`) and Python (`python/`) directories only wrap the compiled binary for distribution (npm and PyPI).
Most changes will not require touching them unless the version or packaging needs to be updated.
+### Adding new MCP tools
+
+The project uses a toolset-based architecture for organizing MCP tools:
+
+- **Tool definitions** are created in `pkg/api/` using the `ServerTool` struct.
+- **Toolsets** group related tools together (e.g., config tools, core Kubernetes tools, Helm tools).
+- **Registration** happens in `pkg/toolsets/` where toolsets are registered at initialization.
+- Each toolset lives in its own subdirectory under `pkg/toolsets/` (e.g., `pkg/toolsets/config/`, `pkg/toolsets/core/`, `pkg/toolsets/helm/`).
+
+When adding a new tool:
+1. Define the tool handler function that implements the tool's logic.
+2. Create a `ServerTool` struct with the tool definition and handler.
+3. Add the tool to an appropriate toolset (or create a new toolset if needed).
+4. Register the toolset in `pkg/toolsets/` if it's a new toolset.
+
## Building
Use the provided Makefile targets:
@@ -105,6 +123,45 @@ make lint
The `lint` target downloads the specified `golangci-lint` version if it is not already present under `_output/tools/bin/`.
+## Additional Makefile targets
+
+Beyond the basic build, test, and lint targets, the Makefile provides additional utilities:
+
+**Local Development:**
+```bash
+# Setup a complete local development environment with Kind cluster
+make local-env-setup
+
+# Tear down the local Kind cluster
+make local-env-teardown
+
+# Show Keycloak status and connection info (for OIDC testing)
+make keycloak-status
+
+# Tail Keycloak logs
+make keycloak-logs
+
+# Install required development tools (like Kind) to ./_output/bin/
+make tools
+```
+
+**Distribution and Publishing:**
+```bash
+# Copy compiled binaries to each npm package
+make npm-copy-binaries
+
+# Publish the npm packages
+make npm-publish
+
+# Publish the Python packages
+make python-publish
+
+# Update README.md with the latest toolsets
+make update-readme-tools
+```
+
+Run `make help` to see all available targets with descriptions.
+
## Dependencies
When introducing new modules run `make tidy` so that `go.mod` and `go.sum` remain tidy.
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 120000
index 00000000..47dc3e3d
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1 @@
+AGENTS.md
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 7f4e4271..04ff1ac0 100644
--- a/Makefile
+++ b/Makefile
@@ -16,7 +16,7 @@ LD_FLAGS = -s -w \
COMMON_BUILD_ARGS = -ldflags "$(LD_FLAGS)"
GOLANGCI_LINT = $(shell pwd)/_output/tools/bin/golangci-lint
-GOLANGCI_LINT_VERSION ?= v2.2.2
+GOLANGCI_LINT_VERSION ?= v2.5.0
# NPM version should not append the -dirty flag
NPM_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//')
@@ -71,16 +71,14 @@ npm-publish: npm-copy-binaries ## Publish the npm packages
$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \
DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \
cd npm/$$DIRNAME; \
- echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> .npmrc; \
jq '.version = "$(NPM_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \
- npm publish; \
+ npm publish --tag latest; \
cd ../..; \
))
cp README.md LICENSE ./npm/kubernetes-mcp-server/
- echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> ./npm/kubernetes-mcp-server/.npmrc
jq '.version = "$(NPM_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \
jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \
- cd npm/kubernetes-mcp-server && npm publish
+ cd npm/kubernetes-mcp-server && npm publish --tag latest
.PHONY: python-publish
python-publish: ## Publish the python packages
@@ -115,3 +113,43 @@ lint: golangci-lint ## Lint the code
.PHONY: update-readme-tools
update-readme-tools: ## Update the README.md file with the latest toolsets
go run ./internal/tools/update-readme/main.go README.md
+
+##@ Tools
+
+.PHONY: tools
+tools: ## Install all required tools (kind) to ./_output/bin/
+ @echo "Checking and installing required tools to ./_output/bin/ ..."
+ @if [ -f _output/bin/kind ]; then echo "[OK] kind already installed"; else echo "Installing kind..."; $(MAKE) -s kind; fi
+ @echo "All tools ready!"
+
+##@ Local Development
+
+.PHONY: local-env-setup
+local-env-setup: ## Setup complete local development environment with Kind cluster
+ @echo "========================================="
+ @echo "Kubernetes MCP Server - Local Setup"
+ @echo "========================================="
+ $(MAKE) tools
+ $(MAKE) kind-create-cluster
+ $(MAKE) keycloak-install
+ $(MAKE) build
+ @echo ""
+ @echo "========================================="
+ @echo "Local environment ready!"
+ @echo "========================================="
+ @echo ""
+ @echo "Configuration file generated:"
+ @echo " _output/config.toml"
+ @echo ""
+ @echo "Run the MCP server with:"
+ @echo " ./$(BINARY_NAME) --port 8008 --config _output/config.toml"
+ @echo ""
+ @echo "Or run with MCP inspector:"
+ @echo " npx @modelcontextprotocol/inspector@latest \$$(pwd)/$(BINARY_NAME) --config _output/config.toml"
+
+.PHONY: local-env-teardown
+local-env-teardown: ## Tear down the local Kind cluster
+ $(MAKE) kind-delete-cluster
+
+# Include build configuration files
+-include build/*.mk
diff --git a/README.md b/README.md
index 600f4981..ee592bd5 100644
--- a/README.md
+++ b/README.md
@@ -54,6 +54,15 @@ If you're using the native binaries you don't need to have Node or Python instal
- Access to a Kubernetes cluster.
+
+Claude Code
+
+Follow the [dedicated Claude Code getting started guide](docs/GETTING_STARTED_CLAUDE_CODE.md) in our [user documentation](docs/).
+
+For a secure production setup with dedicated ServiceAccount and read-only access, also review the [Kubernetes setup guide](docs/GETTING_STARTED_KUBERNETES.md).
+
+
+
### Claude Desktop
#### Using npx
@@ -235,6 +244,18 @@ In case multi-cluster support is enabled (default) and you have access to multip
- **projects_list** - List all the OpenShift projects in the current cluster
+- **nodes_log** - Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet
+ - `name` (`string`) **(required)** - Name of the node to get logs from
+ - `query` (`string`) **(required)** - query specifies services(s) or files from which to return logs (required). Example: "kubelet" to fetch kubelet logs, "/" to fetch a specific log file from the node (e.g., "/var/log/kubelet.log" or "/var/log/kube-proxy.log")
+ - `tailLines` (`integer`) - Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)
+
+- **nodes_stats_summary** - Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics
+ - `name` (`string`) **(required)** - Name of the node to get stats from
+
+- **nodes_top** - List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster
+ - `label_selector` (`string`) - Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)
+ - `name` (`string`) - Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)
+
- **pods_list** - List all the Kubernetes pods in the current cluster from all namespaces
- `labelSelector` (`string`) - Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label
diff --git a/build/keycloak.mk b/build/keycloak.mk
new file mode 100644
index 00000000..86b63907
--- /dev/null
+++ b/build/keycloak.mk
@@ -0,0 +1,450 @@
+# Keycloak IdP for development and testing
+
+KEYCLOAK_NAMESPACE = keycloak
+KEYCLOAK_ADMIN_USER = admin
+KEYCLOAK_ADMIN_PASSWORD = admin
+
+.PHONY: keycloak-install
+keycloak-install:
+ @echo "Installing Keycloak (dev mode using official image)..."
+ @kubectl apply -f dev/config/keycloak/deployment.yaml
+ @echo "Applying Keycloak ingress (cert-manager will create TLS certificate)..."
+ @kubectl apply -f dev/config/keycloak/ingress.yaml
+ @echo "Extracting cert-manager CA certificate..."
+ @mkdir -p _output/cert-manager-ca
+ @kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | base64 -d > _output/cert-manager-ca/ca.crt
+ @echo "✅ cert-manager CA certificate extracted to _output/cert-manager-ca/ca.crt (bind-mounted to API server)"
+ @echo "Restarting Kubernetes API server to pick up new CA..."
+ @docker exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver || \
+ podman exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver
+ @echo "Waiting for API server to restart..."
+ @sleep 5
+ @echo "Waiting for API server to be ready..."
+ @for i in $$(seq 1 30); do \
+ if kubectl get --raw /healthz >/dev/null 2>&1; then \
+ echo "✅ Kubernetes API server updated with cert-manager CA"; \
+ break; \
+ fi; \
+ sleep 2; \
+ done
+ @echo "Waiting for Keycloak to be ready..."
+ @kubectl wait --for=condition=ready pod -l app=keycloak -n $(KEYCLOAK_NAMESPACE) --timeout=120s || true
+ @echo "Waiting for Keycloak HTTP endpoint to be available..."
+ @for i in $$(seq 1 30); do \
+ STATUS=$$(curl -sk -o /dev/null -w "%{http_code}" https://keycloak.127-0-0-1.sslip.io:8443/realms/master 2>/dev/null || echo "000"); \
+ if [ "$$STATUS" = "200" ]; then \
+ echo "✅ Keycloak HTTP endpoint ready"; \
+ break; \
+ fi; \
+ echo " Attempt $$i/30: Waiting for Keycloak (status: $$STATUS)..."; \
+ sleep 3; \
+ done
+ @echo ""
+ @echo "Setting up OpenShift realm..."
+ @$(MAKE) -s keycloak-setup-realm
+ @echo ""
+ @echo "✅ Keycloak installed and configured!"
+ @echo "Access at: https://keycloak.127-0-0-1.sslip.io:8443"
+
+.PHONY: keycloak-uninstall
+keycloak-uninstall:
+ @kubectl delete -f dev/config/keycloak/deployment.yaml 2>/dev/null || true
+
+##@ Keycloak
+
+.PHONY: keycloak-status
+keycloak-status: ## Show Keycloak status and connection info
+ @if kubectl get svc -n $(KEYCLOAK_NAMESPACE) keycloak >/dev/null 2>&1; then \
+ echo "========================================"; \
+ echo "Keycloak Status"; \
+ echo "========================================"; \
+ echo ""; \
+ echo "Status: Installed"; \
+ echo ""; \
+ echo "Admin Console:"; \
+ echo " URL: https://keycloak.127-0-0-1.sslip.io:8443"; \
+ echo " Username: $(KEYCLOAK_ADMIN_USER)"; \
+ echo " Password: $(KEYCLOAK_ADMIN_PASSWORD)"; \
+ echo ""; \
+ echo "OIDC Endpoints (openshift realm):"; \
+ echo " Discovery: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/.well-known/openid-configuration"; \
+ echo " Token: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/token"; \
+ echo " Authorize: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/auth"; \
+ echo " UserInfo: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/userinfo"; \
+ echo " JWKS: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/certs"; \
+ echo ""; \
+ echo "========================================"; \
+ else \
+ echo "Keycloak is not installed. Run: make keycloak-install"; \
+ fi
+
+.PHONY: keycloak-logs
+keycloak-logs: ## Tail Keycloak logs
+ @kubectl logs -n $(KEYCLOAK_NAMESPACE) -l app=keycloak -f --tail=100
+
+.PHONY: keycloak-setup-realm
+keycloak-setup-realm:
+ @echo "========================================="
+ @echo "Setting up OpenShift Realm for Token Exchange"
+ @echo "========================================="
+ @echo "Using Keycloak at https://keycloak.127-0-0-1.sslip.io:8443"
+ @echo ""
+ @echo "Getting admin access token..."
+ @RESPONSE=$$(curl -sk -X POST "https://keycloak.127-0-0-1.sslip.io:8443/realms/master/protocol/openid-connect/token" \
+ -H "Content-Type: application/x-www-form-urlencoded" \
+ -d "username=$(KEYCLOAK_ADMIN_USER)" \
+ -d "password=$(KEYCLOAK_ADMIN_PASSWORD)" \
+ -d "grant_type=password" \
+ -d "client_id=admin-cli"); \
+ TOKEN=$$(echo "$$RESPONSE" | jq -r '.access_token // empty' 2>/dev/null); \
+ if [ -z "$$TOKEN" ] || [ "$$TOKEN" = "null" ]; then \
+ echo "❌ Failed to get access token"; \
+ echo "Response was: $$RESPONSE" | head -c 200; \
+ echo ""; \
+ echo "Check if:"; \
+ echo " - Keycloak is running (make keycloak-install)"; \
+ echo " - Keycloak is accessible at https://keycloak.127-0-0-1.sslip.io:8443"; \
+ echo " - Admin credentials are correct: $(KEYCLOAK_ADMIN_USER)/$(KEYCLOAK_ADMIN_PASSWORD)"; \
+ exit 1; \
+ fi; \
+ echo "✅ Successfully obtained access token"; \
+ echo ""; \
+ echo "Creating OpenShift realm..."; \
+ REALM_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/realm/realm-create.json); \
+ REALM_CODE=$$(echo "$$REALM_RESPONSE" | tail -c 4); \
+ if [ "$$REALM_CODE" = "201" ] || [ "$$REALM_CODE" = "409" ]; then \
+ if [ "$$REALM_CODE" = "201" ]; then echo "✅ OpenShift realm created"; \
+ else echo "✅ OpenShift realm already exists"; fi; \
+ else \
+ echo "❌ Failed to create OpenShift realm (HTTP $$REALM_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Configuring realm events..."; \
+ EVENT_CONFIG_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/realm/realm-events-config.json); \
+ EVENT_CONFIG_CODE=$$(echo "$$EVENT_CONFIG_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$EVENT_CONFIG_CODE" = "204" ]; then \
+ echo "✅ User and admin event logging enabled"; \
+ else \
+ echo "⚠️ Could not configure event logging (HTTP $$EVENT_CONFIG_CODE)"; \
+ fi; \
+ echo ""; \
+ echo "Creating mcp:openshift client scope..."; \
+ SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/client-scopes/mcp-openshift.json); \
+ SCOPE_CODE=$$(echo "$$SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$SCOPE_CODE" = "201" ] || [ "$$SCOPE_CODE" = "409" ]; then \
+ if [ "$$SCOPE_CODE" = "201" ]; then echo "✅ mcp:openshift client scope created"; \
+ else echo "✅ mcp:openshift client scope already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp:openshift scope (HTTP $$SCOPE_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Adding audience mapper to mcp:openshift scope..."; \
+ SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp:openshift") | .id // empty' 2>/dev/null); \
+ if [ -z "$$SCOPE_ID" ]; then \
+ echo "❌ Failed to find mcp:openshift scope"; \
+ exit 1; \
+ fi; \
+ MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$SCOPE_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/openshift-audience.json); \
+ MAPPER_CODE=$$(echo "$$MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MAPPER_CODE" = "201" ] || [ "$$MAPPER_CODE" = "409" ]; then \
+ if [ "$$MAPPER_CODE" = "201" ]; then echo "✅ Audience mapper added"; \
+ else echo "✅ Audience mapper already exists"; fi; \
+ else \
+ echo "❌ Failed to create audience mapper (HTTP $$MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating groups client scope..."; \
+ GROUPS_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/client-scopes/groups.json); \
+ GROUPS_SCOPE_CODE=$$(echo "$$GROUPS_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$GROUPS_SCOPE_CODE" = "201" ] || [ "$$GROUPS_SCOPE_CODE" = "409" ]; then \
+ if [ "$$GROUPS_SCOPE_CODE" = "201" ]; then echo "✅ groups client scope created"; \
+ else echo "✅ groups client scope already exists"; fi; \
+ else \
+ echo "❌ Failed to create groups scope (HTTP $$GROUPS_SCOPE_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Adding group membership mapper to groups scope..."; \
+ SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ GROUPS_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "groups") | .id // empty' 2>/dev/null); \
+ if [ -z "$$GROUPS_SCOPE_ID" ]; then \
+ echo "❌ Failed to find groups scope"; \
+ exit 1; \
+ fi; \
+ GROUPS_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$GROUPS_SCOPE_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/groups-membership.json); \
+ GROUPS_MAPPER_CODE=$$(echo "$$GROUPS_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$GROUPS_MAPPER_CODE" = "201" ] || [ "$$GROUPS_MAPPER_CODE" = "409" ]; then \
+ if [ "$$GROUPS_MAPPER_CODE" = "201" ]; then echo "✅ Group membership mapper added"; \
+ else echo "✅ Group membership mapper already exists"; fi; \
+ else \
+ echo "❌ Failed to create group mapper (HTTP $$GROUPS_MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating mcp-server client scope..."; \
+ MCP_SERVER_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/client-scopes/mcp-server.json); \
+ MCP_SERVER_SCOPE_CODE=$$(echo "$$MCP_SERVER_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ] || [ "$$MCP_SERVER_SCOPE_CODE" = "409" ]; then \
+ if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ]; then echo "✅ mcp-server client scope created"; \
+ else echo "✅ mcp-server client scope already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp-server scope (HTTP $$MCP_SERVER_SCOPE_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Adding audience mapper to mcp-server scope..."; \
+ SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ MCP_SERVER_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp-server") | .id // empty' 2>/dev/null); \
+ if [ -z "$$MCP_SERVER_SCOPE_ID" ]; then \
+ echo "❌ Failed to find mcp-server scope"; \
+ exit 1; \
+ fi; \
+ MCP_SERVER_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$MCP_SERVER_SCOPE_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/mcp-server-audience.json); \
+ MCP_SERVER_MAPPER_CODE=$$(echo "$$MCP_SERVER_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ] || [ "$$MCP_SERVER_MAPPER_CODE" = "409" ]; then \
+ if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ]; then echo "✅ mcp-server audience mapper added"; \
+ else echo "✅ mcp-server audience mapper already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp-server audience mapper (HTTP $$MCP_SERVER_MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating openshift service client..."; \
+ OPENSHIFT_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/clients/openshift.json); \
+ OPENSHIFT_CLIENT_CODE=$$(echo "$$OPENSHIFT_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ] || [ "$$OPENSHIFT_CLIENT_CODE" = "409" ]; then \
+ if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ]; then echo "✅ openshift client created"; \
+ else echo "✅ openshift client already exists"; fi; \
+ else \
+ echo "❌ Failed to create openshift client (HTTP $$OPENSHIFT_CLIENT_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Adding username mapper to openshift client..."; \
+ OPENSHIFT_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ OPENSHIFT_CLIENT_ID=$$(echo "$$OPENSHIFT_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "openshift") | .id // empty' 2>/dev/null); \
+ OPENSHIFT_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$OPENSHIFT_CLIENT_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/username.json); \
+ OPENSHIFT_USERNAME_MAPPER_CODE=$$(echo "$$OPENSHIFT_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ] || [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "409" ]; then \
+ if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to openshift client"; \
+ else echo "✅ Username mapper already exists on openshift client"; fi; \
+ else \
+ echo "❌ Failed to create username mapper (HTTP $$OPENSHIFT_USERNAME_MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating mcp-client public client..."; \
+ MCP_PUBLIC_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/clients/mcp-client.json); \
+ MCP_PUBLIC_CLIENT_CODE=$$(echo "$$MCP_PUBLIC_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ] || [ "$$MCP_PUBLIC_CLIENT_CODE" = "409" ]; then \
+ if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ]; then echo "✅ mcp-client public client created"; \
+ else echo "✅ mcp-client public client already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp-client public client (HTTP $$MCP_PUBLIC_CLIENT_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Adding username mapper to mcp-client..."; \
+ MCP_PUBLIC_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ MCP_PUBLIC_CLIENT_ID=$$(echo "$$MCP_PUBLIC_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-client") | .id // empty' 2>/dev/null); \
+ MCP_PUBLIC_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_PUBLIC_CLIENT_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/username.json); \
+ MCP_PUBLIC_USERNAME_MAPPER_CODE=$$(echo "$$MCP_PUBLIC_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "409" ]; then \
+ if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-client"; \
+ else echo "✅ Username mapper already exists on mcp-client"; fi; \
+ else \
+ echo "❌ Failed to create username mapper (HTTP $$MCP_PUBLIC_USERNAME_MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating mcp-server client with token exchange..."; \
+ MCP_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/clients/mcp-server.json); \
+ MCP_CLIENT_CODE=$$(echo "$$MCP_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_CLIENT_CODE" = "201" ] || [ "$$MCP_CLIENT_CODE" = "409" ]; then \
+ if [ "$$MCP_CLIENT_CODE" = "201" ]; then echo "✅ mcp-server client created"; \
+ else echo "✅ mcp-server client already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp-server client (HTTP $$MCP_CLIENT_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Enabling standard token exchange for mcp-server..."; \
+ CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ MCP_CLIENT_ID=$$(echo "$$CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-server") | .id // empty' 2>/dev/null); \
+ if [ -z "$$MCP_CLIENT_ID" ]; then \
+ echo "❌ Failed to find mcp-server client"; \
+ exit 1; \
+ fi; \
+ UPDATE_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/clients/mcp-server-update.json); \
+ UPDATE_CLIENT_CODE=$$(echo "$$UPDATE_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$UPDATE_CLIENT_CODE" = "204" ]; then \
+ echo "✅ Standard token exchange enabled for mcp-server client"; \
+ else \
+ echo "⚠️ Could not enable token exchange (HTTP $$UPDATE_CLIENT_CODE)"; \
+ fi; \
+ echo ""; \
+ echo "Getting mcp-server client secret..."; \
+ SECRET_RESPONSE=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/client-secret" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Accept: application/json"); \
+ CLIENT_SECRET=$$(echo "$$SECRET_RESPONSE" | jq -r '.value // empty' 2>/dev/null); \
+ if [ -z "$$CLIENT_SECRET" ]; then \
+ echo "❌ Failed to get client secret"; \
+ else \
+ echo "✅ Client secret retrieved"; \
+ fi; \
+ echo ""; \
+ echo "Adding username mapper to mcp-server client..."; \
+ MCP_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/protocol-mappers/models" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/mappers/username.json); \
+ MCP_USERNAME_MAPPER_CODE=$$(echo "$$MCP_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
+ if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_USERNAME_MAPPER_CODE" = "409" ]; then \
+ if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-server client"; \
+ else echo "✅ Username mapper already exists on mcp-server client"; fi; \
+ else \
+ echo "❌ Failed to create username mapper (HTTP $$MCP_USERNAME_MAPPER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Creating test user mcp/mcp..."; \
+ USER_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/users" \
+ -H "Authorization: Bearer $$TOKEN" \
+ -H "Content-Type: application/json" \
+ -d @dev/config/keycloak/users/mcp.json); \
+ USER_CODE=$$(echo "$$USER_RESPONSE" | tail -c 4); \
+ if [ "$$USER_CODE" = "201" ] || [ "$$USER_CODE" = "409" ]; then \
+ if [ "$$USER_CODE" = "201" ]; then echo "✅ mcp user created"; \
+ else echo "✅ mcp user already exists"; fi; \
+ else \
+ echo "❌ Failed to create mcp user (HTTP $$USER_CODE)"; \
+ exit 1; \
+ fi; \
+ echo ""; \
+ echo "Setting up RBAC for mcp user..."; \
+ kubectl apply -f dev/config/keycloak/rbac.yaml; \
+ echo "✅ RBAC binding created for mcp user"; \
+ echo ""; \
+ echo "🎉 OpenShift realm setup complete!"; \
+ echo ""; \
+ echo "========================================"; \
+ echo "Configuration Summary"; \
+ echo "========================================"; \
+ echo "Realm: openshift"; \
+ echo "Authorization URL: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
+ echo "Issuer URL (for config.toml): https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
+ echo ""; \
+ echo "Test User:"; \
+ echo " Username: mcp"; \
+ echo " Password: mcp"; \
+ echo " Email: mcp@example.com"; \
+ echo " RBAC: cluster-admin (full cluster access)"; \
+ echo ""; \
+ echo "Clients:"; \
+ echo " mcp-client (public, for browser-based auth)"; \
+ echo " Client ID: mcp-client"; \
+ echo " Optional Scopes: mcp-server"; \
+ echo " mcp-server (confidential, token exchange enabled)"; \
+ echo " Client ID: mcp-server"; \
+ echo " Client Secret: $$CLIENT_SECRET"; \
+ echo " openshift (service account)"; \
+ echo " Client ID: openshift"; \
+ echo ""; \
+ echo "Client Scopes:"; \
+ echo " mcp-server (default) - Audience: mcp-server"; \
+ echo " mcp:openshift (optional) - Audience: openshift"; \
+ echo " groups (default) - Group membership mapper"; \
+ echo ""; \
+ echo "TOML Configuration (config.toml):"; \
+ echo " require_oauth = true"; \
+ echo " oauth_audience = \"mcp-server\""; \
+ echo " oauth_scopes = [\"openid\", \"mcp-server\"]"; \
+ echo " validate_token = false"; \
+ echo " authorization_url = \"https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift\""; \
+ echo " sts_client_id = \"mcp-server\""; \
+ echo " sts_client_secret = \"$$CLIENT_SECRET\""; \
+ echo " sts_audience = \"openshift\""; \
+ echo " sts_scopes = [\"mcp:openshift\"]"; \
+ echo " certificate_authority = \"_output/cert-manager-ca/ca.crt\""; \
+ echo "========================================"; \
+ echo ""; \
+ echo "Note: The Kubernetes API server is configured with:"; \
+ echo " --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
+ echo ""; \
+ echo "Important: The cert-manager CA certificate was extracted to:"; \
+ echo " _output/cert-manager-ca/ca.crt"; \
+ echo ""; \
+ echo "Writing configuration to _output/config.toml..."; \
+ mkdir -p _output; \
+ printf '%s\n' \
+ 'require_oauth = true' \
+ 'oauth_audience = "mcp-server"' \
+ 'oauth_scopes = ["openid", "mcp-server"]' \
+ 'validate_token = false' \
+ 'authorization_url = "https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"' \
+ 'sts_client_id = "mcp-server"' \
+ "sts_client_secret = \"$$CLIENT_SECRET\"" \
+ 'sts_audience = "openshift"' \
+ 'sts_scopes = ["mcp:openshift"]' \
+ 'certificate_authority = "_output/cert-manager-ca/ca.crt"' \
+ > _output/config.toml; \
+ echo "✅ Configuration written to _output/config.toml"
diff --git a/build/kind.mk b/build/kind.mk
new file mode 100644
index 00000000..fe83f1ab
--- /dev/null
+++ b/build/kind.mk
@@ -0,0 +1,61 @@
+# Kind cluster management
+
+KIND_CLUSTER_NAME ?= kubernetes-mcp-server
+
+# Detect container engine (docker or podman)
+CONTAINER_ENGINE ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null)
+
+.PHONY: kind-create-certs
+kind-create-certs:
+ @if [ ! -f _output/cert-manager-ca/ca.crt ]; then \
+ echo "Creating placeholder CA certificate for bind mount..."; \
+ ./hack/generate-placeholder-ca.sh; \
+ else \
+ echo "✅ Placeholder CA already exists"; \
+ fi
+
+.PHONY: kind-create-cluster
+kind-create-cluster: kind kind-create-certs
+ @# Set KIND provider for podman on Linux
+ @if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \
+ export KIND_EXPERIMENTAL_PROVIDER=podman; \
+ fi; \
+ if $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \
+ echo "Kind cluster '$(KIND_CLUSTER_NAME)' already exists, skipping creation"; \
+ else \
+ echo "Creating Kind cluster '$(KIND_CLUSTER_NAME)'..."; \
+ $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config dev/config/kind/cluster.yaml; \
+ echo "Adding ingress-ready label to control-plane node..."; \
+ kubectl label node $(KIND_CLUSTER_NAME)-control-plane ingress-ready=true --overwrite; \
+ echo "Installing nginx ingress controller..."; \
+ kubectl apply -f dev/config/ingress/nginx-ingress.yaml; \
+ echo "Waiting for ingress controller to be ready..."; \
+ kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s; \
+ echo "✅ Ingress controller ready"; \
+ echo "Installing cert-manager..."; \
+ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml; \
+ echo "Waiting for cert-manager to be ready..."; \
+ kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager --timeout=120s; \
+ kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-cainjector --timeout=120s; \
+ kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-webhook --timeout=120s; \
+ echo "✅ cert-manager ready"; \
+ echo "Creating cert-manager ClusterIssuer..."; \
+ sleep 5; \
+ kubectl apply -f dev/config/cert-manager/selfsigned-issuer.yaml; \
+ echo "✅ ClusterIssuer created"; \
+ echo "Adding /etc/hosts entry for Keycloak in control plane..."; \
+ if command -v docker >/dev/null 2>&1 && docker ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \
+ docker exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \
+ elif command -v podman >/dev/null 2>&1 && podman ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \
+ podman exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \
+ fi; \
+ echo "✅ /etc/hosts entry added"; \
+ fi
+
+.PHONY: kind-delete-cluster
+kind-delete-cluster: kind
+ @# Set KIND provider for podman on Linux
+ @if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \
+ export KIND_EXPERIMENTAL_PROVIDER=podman; \
+ fi; \
+ $(KIND) delete cluster --name $(KIND_CLUSTER_NAME)
diff --git a/build/tools.mk b/build/tools.mk
new file mode 100644
index 00000000..20482bc9
--- /dev/null
+++ b/build/tools.mk
@@ -0,0 +1,20 @@
+# Tools
+
+# Platform detection
+OS := $(shell uname -s | tr '[:upper:]' '[:lower:]')
+ARCH := $(shell uname -m | tr '[:upper:]' '[:lower:]')
+ifeq ($(ARCH),x86_64)
+ ARCH = amd64
+endif
+ifeq ($(ARCH),aarch64)
+ ARCH = arm64
+endif
+
+KIND = _output/bin/kind
+KIND_VERSION = v0.30.0
+$(KIND):
+ @mkdir -p _output/bin
+ GOBIN=$(PWD)/_output/bin go install sigs.k8s.io/kind@$(KIND_VERSION)
+
+.PHONY: kind
+kind: $(KIND)
diff --git a/dev/config/cert-manager/selfsigned-issuer.yaml b/dev/config/cert-manager/selfsigned-issuer.yaml
new file mode 100644
index 00000000..8bb27f7a
--- /dev/null
+++ b/dev/config/cert-manager/selfsigned-issuer.yaml
@@ -0,0 +1,31 @@
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: selfsigned-issuer
+spec:
+ selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: selfsigned-ca
+ namespace: cert-manager
+spec:
+ isCA: true
+ commonName: selfsigned-ca
+ secretName: selfsigned-ca-secret
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: selfsigned-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: selfsigned-ca-issuer
+spec:
+ ca:
+ secretName: selfsigned-ca-secret
diff --git a/dev/config/ingress/nginx-ingress.yaml b/dev/config/ingress/nginx-ingress.yaml
new file mode 100644
index 00000000..8405740d
--- /dev/null
+++ b/dev/config/ingress/nginx-ingress.yaml
@@ -0,0 +1,386 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ingress-nginx
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+data:
+ allow-snippet-annotations: "true"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ name: ingress-nginx
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ name: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ingress-nginx
+subjects:
+ - kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx
+ namespace: ingress-nginx
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ resourceNames:
+ - ingress-nginx-leader
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx
+ namespace: ingress-nginx
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ingress-nginx
+subjects:
+ - kind: ServiceAccount
+ name: ingress-nginx
+ namespace: ingress-nginx
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ type: NodePort
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: http
+ appProtocol: http
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ appProtocol: https
+ selector:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ replicas: 1
+ revisionHistoryLimit: 10
+ minReadySeconds: 0
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ spec:
+ dnsPolicy: ClusterFirst
+ containers:
+ - name: controller
+ image: registry.k8s.io/ingress-nginx/controller:v1.11.1
+ imagePullPolicy: IfNotPresent
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /wait-shutdown
+ args:
+ - /nginx-ingress-controller
+ - --election-id=ingress-nginx-leader
+ - --controller-class=k8s.io/ingress-nginx
+ - --ingress-class=nginx
+ - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
+ - --watch-ingress-without-class=true
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 101
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: LD_PRELOAD
+ value: /usr/local/lib/libmimalloc.so
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ hostPort: 80
+ - name: https
+ containerPort: 443
+ protocol: TCP
+ hostPort: 443
+ - name: https-alt
+ containerPort: 443
+ protocol: TCP
+ hostPort: 8443
+ - name: webhook
+ containerPort: 8443
+ protocol: TCP
+ resources:
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ nodeSelector:
+ ingress-ready: "true"
+ kubernetes.io/os: linux
+ serviceAccountName: ingress-nginx
+ terminationGracePeriodSeconds: 0
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Equal
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Equal
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx
+ app.kubernetes.io/instance: ingress-nginx
+ app.kubernetes.io/component: controller
+ name: nginx
+spec:
+ controller: k8s.io/ingress-nginx
diff --git a/dev/config/keycloak/client-scopes/groups.json b/dev/config/keycloak/client-scopes/groups.json
new file mode 100644
index 00000000..4eb20b74
--- /dev/null
+++ b/dev/config/keycloak/client-scopes/groups.json
@@ -0,0 +1,8 @@
+{
+ "name": "groups",
+ "protocol": "openid-connect",
+ "attributes": {
+ "display.on.consent.screen": "false",
+ "include.in.token.scope": "true"
+ }
+}
diff --git a/dev/config/keycloak/client-scopes/mcp-openshift.json b/dev/config/keycloak/client-scopes/mcp-openshift.json
new file mode 100644
index 00000000..39f55e7b
--- /dev/null
+++ b/dev/config/keycloak/client-scopes/mcp-openshift.json
@@ -0,0 +1,8 @@
+{
+ "name": "mcp:openshift",
+ "protocol": "openid-connect",
+ "attributes": {
+ "display.on.consent.screen": "false",
+ "include.in.token.scope": "true"
+ }
+}
diff --git a/dev/config/keycloak/client-scopes/mcp-server.json b/dev/config/keycloak/client-scopes/mcp-server.json
new file mode 100644
index 00000000..5ac0440b
--- /dev/null
+++ b/dev/config/keycloak/client-scopes/mcp-server.json
@@ -0,0 +1,8 @@
+{
+ "name": "mcp-server",
+ "protocol": "openid-connect",
+ "attributes": {
+ "display.on.consent.screen": "false",
+ "include.in.token.scope": "true"
+ }
+}
diff --git a/dev/config/keycloak/clients/mcp-client.json b/dev/config/keycloak/clients/mcp-client.json
new file mode 100644
index 00000000..7f2c596e
--- /dev/null
+++ b/dev/config/keycloak/clients/mcp-client.json
@@ -0,0 +1,13 @@
+{
+ "clientId": "mcp-client",
+ "enabled": true,
+ "publicClient": true,
+ "standardFlowEnabled": true,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": false,
+ "authorizationServicesEnabled": false,
+ "redirectUris": ["*"],
+ "webOrigins": ["*"],
+ "defaultClientScopes": ["profile", "email"],
+ "optionalClientScopes": ["mcp-server"]
+}
diff --git a/dev/config/keycloak/clients/mcp-server-update.json b/dev/config/keycloak/clients/mcp-server-update.json
new file mode 100644
index 00000000..2709e75c
--- /dev/null
+++ b/dev/config/keycloak/clients/mcp-server-update.json
@@ -0,0 +1,20 @@
+{
+ "clientId": "mcp-server",
+ "enabled": true,
+ "publicClient": false,
+ "standardFlowEnabled": true,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": true,
+ "authorizationServicesEnabled": false,
+ "redirectUris": ["*"],
+ "webOrigins": ["*"],
+ "defaultClientScopes": ["profile", "email", "groups", "mcp-server"],
+ "optionalClientScopes": ["mcp:openshift"],
+ "attributes": {
+ "oauth2.device.authorization.grant.enabled": "false",
+ "oidc.ciba.grant.enabled": "false",
+ "backchannel.logout.session.required": "true",
+ "backchannel.logout.revoke.offline.tokens": "false",
+ "standard.token.exchange.enabled": "true"
+ }
+}
diff --git a/dev/config/keycloak/clients/mcp-server.json b/dev/config/keycloak/clients/mcp-server.json
new file mode 100644
index 00000000..873fa5ce
--- /dev/null
+++ b/dev/config/keycloak/clients/mcp-server.json
@@ -0,0 +1,19 @@
+{
+ "clientId": "mcp-server",
+ "enabled": true,
+ "publicClient": false,
+ "standardFlowEnabled": true,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": true,
+ "authorizationServicesEnabled": false,
+ "redirectUris": ["*"],
+ "webOrigins": ["*"],
+ "defaultClientScopes": ["profile", "email", "groups", "mcp-server"],
+ "optionalClientScopes": ["mcp:openshift"],
+ "attributes": {
+ "oauth2.device.authorization.grant.enabled": "false",
+ "oidc.ciba.grant.enabled": "false",
+ "backchannel.logout.session.required": "true",
+ "backchannel.logout.revoke.offline.tokens": "false"
+ }
+}
diff --git a/dev/config/keycloak/clients/openshift.json b/dev/config/keycloak/clients/openshift.json
new file mode 100644
index 00000000..2905c437
--- /dev/null
+++ b/dev/config/keycloak/clients/openshift.json
@@ -0,0 +1,13 @@
+{
+ "clientId": "openshift",
+ "enabled": true,
+ "publicClient": false,
+ "standardFlowEnabled": true,
+ "directAccessGrantsEnabled": true,
+ "serviceAccountsEnabled": true,
+ "authorizationServicesEnabled": false,
+ "redirectUris": ["*"],
+ "webOrigins": ["*"],
+ "defaultClientScopes": ["profile", "email", "groups"],
+ "optionalClientScopes": []
+}
diff --git a/dev/config/keycloak/deployment.yaml b/dev/config/keycloak/deployment.yaml
new file mode 100644
index 00000000..efcb7e0f
--- /dev/null
+++ b/dev/config/keycloak/deployment.yaml
@@ -0,0 +1,71 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: keycloak
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: keycloak
+ namespace: keycloak
+ labels:
+ app: keycloak
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: keycloak
+ template:
+ metadata:
+ labels:
+ app: keycloak
+ spec:
+ containers:
+ - name: keycloak
+ image: quay.io/keycloak/keycloak:26.4
+ args: ["start-dev"]
+ env:
+ - name: KC_BOOTSTRAP_ADMIN_USERNAME
+ value: "admin"
+ - name: KC_BOOTSTRAP_ADMIN_PASSWORD
+ value: "admin"
+ - name: KC_HOSTNAME
+ value: "https://keycloak.127-0-0-1.sslip.io:8443"
+ - name: KC_HTTP_ENABLED
+ value: "true"
+ - name: KC_HEALTH_ENABLED
+ value: "true"
+ - name: KC_PROXY_HEADERS
+ value: "xforwarded"
+ ports:
+ - name: http
+ containerPort: 8080
+ readinessProbe:
+ httpGet:
+ path: /health/ready
+ port: 9000
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /health/live
+ port: 9000
+ initialDelaySeconds: 60
+ periodSeconds: 30
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: keycloak
+ namespace: keycloak
+ labels:
+ app: keycloak
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 8080
+ selector:
+ app: keycloak
+ type: ClusterIP
diff --git a/dev/config/keycloak/ingress.yaml b/dev/config/keycloak/ingress.yaml
new file mode 100644
index 00000000..d172e091
--- /dev/null
+++ b/dev/config/keycloak/ingress.yaml
@@ -0,0 +1,34 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: keycloak
+ namespace: keycloak
+ labels:
+ app: keycloak
+ annotations:
+ cert-manager.io/cluster-issuer: "selfsigned-ca-issuer"
+ nginx.ingress.kubernetes.io/ssl-redirect: "true"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
+ # Required for Keycloak 26.2.0+ to include port in issuer URLs
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_set_header X-Forwarded-Port 8443;
+ proxy_set_header X-Forwarded-Host $host:8443;
+spec:
+ ingressClassName: nginx
+ tls:
+ - hosts:
+ - keycloak.127-0-0-1.sslip.io
+ secretName: keycloak-tls-cert
+ rules:
+ - host: keycloak.127-0-0-1.sslip.io
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: keycloak
+ port:
+ number: 80
diff --git a/dev/config/keycloak/mappers/groups-membership.json b/dev/config/keycloak/mappers/groups-membership.json
new file mode 100644
index 00000000..266a66e9
--- /dev/null
+++ b/dev/config/keycloak/mappers/groups-membership.json
@@ -0,0 +1,12 @@
+{
+ "name": "groups",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper",
+ "config": {
+ "claim.name": "groups",
+ "full.path": "false",
+ "id.token.claim": "true",
+ "access.token.claim": "true",
+ "userinfo.token.claim": "true"
+ }
+}
diff --git a/dev/config/keycloak/mappers/mcp-server-audience.json b/dev/config/keycloak/mappers/mcp-server-audience.json
new file mode 100644
index 00000000..37b7e969
--- /dev/null
+++ b/dev/config/keycloak/mappers/mcp-server-audience.json
@@ -0,0 +1,10 @@
+{
+ "name": "mcp-server-audience",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-audience-mapper",
+ "config": {
+ "included.client.audience": "mcp-server",
+ "id.token.claim": "true",
+ "access.token.claim": "true"
+ }
+}
diff --git a/dev/config/keycloak/mappers/openshift-audience.json b/dev/config/keycloak/mappers/openshift-audience.json
new file mode 100644
index 00000000..74b84b71
--- /dev/null
+++ b/dev/config/keycloak/mappers/openshift-audience.json
@@ -0,0 +1,10 @@
+{
+ "name": "openshift-audience",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-audience-mapper",
+ "config": {
+ "included.client.audience": "openshift",
+ "id.token.claim": "true",
+ "access.token.claim": "true"
+ }
+}
diff --git a/dev/config/keycloak/mappers/username.json b/dev/config/keycloak/mappers/username.json
new file mode 100644
index 00000000..d76ccfa2
--- /dev/null
+++ b/dev/config/keycloak/mappers/username.json
@@ -0,0 +1,13 @@
+{
+ "name": "username",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-usermodel-property-mapper",
+ "config": {
+ "userinfo.token.claim": "true",
+ "user.attribute": "username",
+ "id.token.claim": "true",
+ "access.token.claim": "true",
+ "claim.name": "preferred_username",
+ "jsonType.label": "String"
+ }
+}
diff --git a/dev/config/keycloak/rbac.yaml b/dev/config/keycloak/rbac.yaml
new file mode 100644
index 00000000..6f3f8c75
--- /dev/null
+++ b/dev/config/keycloak/rbac.yaml
@@ -0,0 +1,20 @@
+# RBAC ClusterRoleBinding for mcp user with OIDC authentication
+#
+# IMPORTANT: This requires Kubernetes API server to be configured with OIDC:
+# --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift
+# --oidc-username-claim=preferred_username
+#
+# Without OIDC configuration, this binding will not work.
+#
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: oidc-mcp-cluster-admin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: User
+ name: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift#mcp
diff --git a/dev/config/keycloak/realm/realm-create.json b/dev/config/keycloak/realm/realm-create.json
new file mode 100644
index 00000000..d651e7dd
--- /dev/null
+++ b/dev/config/keycloak/realm/realm-create.json
@@ -0,0 +1,4 @@
+{
+ "realm": "openshift",
+ "enabled": true
+}
diff --git a/dev/config/keycloak/realm/realm-events-config.json b/dev/config/keycloak/realm/realm-events-config.json
new file mode 100644
index 00000000..72b07a5b
--- /dev/null
+++ b/dev/config/keycloak/realm/realm-events-config.json
@@ -0,0 +1,8 @@
+{
+ "realm": "openshift",
+ "enabled": true,
+ "eventsEnabled": true,
+ "eventsListeners": ["jboss-logging"],
+ "adminEventsEnabled": true,
+ "adminEventsDetailsEnabled": true
+}
diff --git a/dev/config/keycloak/users/mcp.json b/dev/config/keycloak/users/mcp.json
new file mode 100644
index 00000000..b84bc3f2
--- /dev/null
+++ b/dev/config/keycloak/users/mcp.json
@@ -0,0 +1,15 @@
+{
+ "username": "mcp",
+ "email": "mcp@example.com",
+ "firstName": "MCP",
+ "lastName": "User",
+ "enabled": true,
+ "emailVerified": true,
+ "credentials": [
+ {
+ "type": "password",
+ "value": "mcp",
+ "temporary": false
+ }
+ ]
+}
diff --git a/dev/config/kind/cluster.yaml b/dev/config/kind/cluster.yaml
new file mode 100644
index 00000000..fda11689
--- /dev/null
+++ b/dev/config/kind/cluster.yaml
@@ -0,0 +1,30 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ extraMounts:
+ - hostPath: ./_output/cert-manager-ca/ca.crt
+ containerPath: /etc/kubernetes/pki/keycloak-ca.crt
+ readOnly: true
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ oidc-issuer-url: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift
+ oidc-client-id: openshift
+ oidc-username-claim: preferred_username
+ oidc-groups-claim: groups
+ oidc-ca-file: /etc/kubernetes/pki/keycloak-ca.crt
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 8000
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 8443
+ protocol: TCP
diff --git a/docs/GETTING_STARTED_CLAUDE_CODE.md b/docs/GETTING_STARTED_CLAUDE_CODE.md
new file mode 100644
index 00000000..b8e2985a
--- /dev/null
+++ b/docs/GETTING_STARTED_CLAUDE_CODE.md
@@ -0,0 +1,99 @@
+# Using Kubernetes MCP Server with Claude Code CLI
+
+This guide shows you how to configure the Kubernetes MCP Server with Claude Code CLI.
+
+> **Prerequisites:** Complete the [Getting Started with Kubernetes](GETTING_STARTED_KUBERNETES.md) guide first to create a ServiceAccount and kubeconfig file.
+
+## Quick Setup
+
+Add the MCP server using the `claude mcp add-json` command:
+
+```bash
+claude mcp add-json kubernetes-mcp-server \
+ '{"command":"npx","args":["-y","kubernetes-mcp-server@latest","--read-only"],"env":{"KUBECONFIG":"'${HOME}'/.kube/mcp-viewer.kubeconfig"}}' \
+ -s user
+```
+
+**What this does:**
+- Adds the Kubernetes MCP Server to your Claude Code configuration
+- Uses `npx` to automatically download and run the latest version
+- Enables read-only mode for safety
+- Uses the kubeconfig file you created in the Getting Started guide
+- `-s user` makes it available in all your projects
+
+## Manual Configuration (Alternative)
+
+If you prefer to edit the config file manually:
+
+**Location:** `~/.config/claude-code/config.toml`
+
+```toml
+[[mcp_servers]]
+name = "kubernetes-mcp-server"
+command = "npx"
+args = [
+ "-y",
+ "kubernetes-mcp-server@latest",
+ "--read-only"
+]
+
+[mcp_servers.env]
+KUBECONFIG = "/home/YOUR_USERNAME/.kube/mcp-viewer.kubeconfig"
+```
+
+**Important:** Replace `/home/YOUR_USERNAME/` with your actual home directory path.
+
+## Verify Connection
+
+After adding the MCP server, verify it's connected:
+
+```bash
+claude mcp list
+```
+
+Expected output:
+```
+Checking MCP server health...
+
+kubernetes-mcp-server: npx -y kubernetes-mcp-server@latest --read-only - ✓ Connected
+```
+
+## Using the MCP Server
+
+Once connected, interact with your Kubernetes cluster using natural language! Claude will use the Kubernetes MCP Server
+to fetch and display the relevant information from your cluster:
+
+```bash
+> List all namespaces in my cluster
+
+● I'll list all the namespaces in your Kubernetes cluster.
+ ⎿ APIVERSION KIND NAME STATUS AGE LABELS
+ v1 Namespace default Active 77m kubernetes.io/metadata.name=default
+ v1 Namespace kube-node-lease Active 77m kubernetes.io/metadata.name=kube-node-lease
+ … +4 lines (ctrl+o to expand)
+
+● Your cluster has 6 namespaces:
+
+ 1. default - The default namespace for resources without a specified namespace
+ 2. kube-node-lease - Used for node heartbeat/lease objects
+ 3. kube-public - Publicly accessible namespace, typically for cluster information
+ 4. kube-system - System namespace for Kubernetes control plane components
+ 5. local-path-storage - Likely used for local storage provisioning
+ 6. mcp - Custom namespace (created ~75 minutes ago)
+
+ All namespaces are in Active status and the cluster appears to be running for about 77 minutes.
+```
+
+## Configuration Options
+
+Common command-line flags you can add to the `args` array:
+
+| Flag | Description |
+|------|-------------|
+| `--read-only` | Enable read-only mode (recommended) |
+| `--kubeconfig ` | Path to kubeconfig file (or use `KUBECONFIG` env var) |
+
+## Next Steps
+
+- Review the [Getting Started with Kubernetes](GETTING_STARTED_KUBERNETES.md) guide for more details on ServiceAccount setup
+- Explore the [main README](../README.md) for more MCP server capabilities
diff --git a/docs/GETTING_STARTED_KUBERNETES.md b/docs/GETTING_STARTED_KUBERNETES.md
new file mode 100644
index 00000000..9f3613bf
--- /dev/null
+++ b/docs/GETTING_STARTED_KUBERNETES.md
@@ -0,0 +1,242 @@
+# Getting Started with Kubernetes MCP Server
+
+This guide walks you through the foundational setup for using the Kubernetes MCP Server with your Kubernetes cluster. You'll create a dedicated, read-only ServiceAccount and generate a secure kubeconfig file that can be used with various MCP clients.
+
+> **Note:** This setup is **recommended for production use** but not strictly required. The MCP Server can use your existing kubeconfig file (e.g., `~/.kube/config`), but using a dedicated ServiceAccount with limited permissions follows the principle of least privilege and is more secure.
+
+> **Next:** After completing this guide, continue with the [Claude Code CLI guide](GETTING_STARTED_CLAUDE_CODE.md). See the [docs README](README.md) for all available guides.
+
+## What You'll Create
+
+By the end of this guide, you'll have:
+- A dedicated `mcp-viewer` ServiceAccount with read-only cluster access
+- A secure, time-bound authentication token
+- A dedicated kubeconfig file (`~/.kube/mcp-viewer.kubeconfig`)
+
+## Prerequisites
+
+- A running Kubernetes cluster
+- `kubectl` CLI installed and configured
+- Cluster admin permissions to create ServiceAccounts and RBAC bindings
+
+## 1. Create a Read-Only ServiceAccount and RBAC
+
+A ServiceAccount represents a non-human identity. Binding it to a read-only role lets tools query the cluster safely without using administrator credentials.
+
+### Step 1.1: Create the Namespace and ServiceAccount
+
+First, create a Namespace for the ServiceAccount:
+
+```bash
+# Create or pick a Namespace for the ServiceAccount
+kubectl create namespace mcp
+
+# Create the ServiceAccount
+kubectl create serviceaccount mcp-viewer -n mcp
+```
+
+### Step 1.2: Grant Read-Only Access (RBAC)
+
+Use a ClusterRoleBinding or RoleBinding to grant read-only permissions.
+
+#### Option A: Cluster-Wide Read-Only (Most Common)
+
+This binds the ServiceAccount to the built-in `view` ClusterRole, which provides read-only access across the whole cluster.
+
+```bash
+# Binds the view ClusterRole to the ServiceAccount
+kubectl create clusterrolebinding mcp-viewer-crb \
+ --clusterrole=view \
+ --serviceaccount=mcp:mcp-viewer
+```
+
+#### Option B: Namespace-Scoped Only (Tighter Scope)
+
+This limits read access to the `mcp` namespace only, using the built-in `view` Role.
+
+```bash
+# Binds the view role to the ServiceAccount within the 'mcp' namespace
+kubectl create rolebinding mcp-viewer-rb \
+ --role=view \
+ --serviceaccount=mcp:mcp-viewer \
+ -n mcp
+```
+
+### Quick Verification (Optional)
+
+Verify the permissions granted to the ServiceAccount:
+
+```bash
+# Check if the ServiceAccount can list pods cluster-wide
+# Expect 'yes' if you used the view ClusterRole (Option A)
+kubectl auth can-i list pods --as=system:serviceaccount:mcp:mcp-viewer --all-namespaces
+```
+
+## 2. Mint a ServiceAccount Token
+
+Tools authenticate via a bearer token. We use the TokenRequest API (`kubectl create token`) to generate a secure, short-lived token.
+
+```bash
+# Create a time-bound token (choose a duration, e.g., 2 hours)
+TOKEN="$(kubectl create token mcp-viewer --duration=2h -n mcp)"
+
+# Verify the token was generated (Optional)
+echo "$TOKEN"
+```
+
+**Note:** The `kubectl create token` command requires Kubernetes v1.24+. For older versions, you'll need to extract the token from the ServiceAccount's secret.
+
+## 3. Build a Dedicated Kubeconfig
+
+A dedicated kubeconfig file isolates this ServiceAccount's credentials from your personal admin credentials, making it easy to point external tools at.
+
+### Step 3.1: Get Cluster Details
+
+Get the API server address and certificate authority from your current active context:
+
+```bash
+# 1. Get the current cluster API server address
+API_SERVER="$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')"
+
+# 2. Get the cluster's Certificate Authority (CA) file path or data
+# First, try to get the CA file path
+CA_FILE="$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}')"
+
+# If CA file is not set, extract the CA data and write it to a temp file
+if [ -z "$CA_FILE" ]; then
+ CA_FILE="/tmp/k8s-ca-$$.crt"
+ kubectl config view --minify --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 -d > "$CA_FILE"
+fi
+
+# 3. Define the desired context name
+CONTEXT_NAME="mcp-viewer-context"
+KUBECONFIG_FILE="$HOME/.kube/mcp-viewer.kubeconfig"
+```
+
+### Step 3.2: Create and Configure the Kubeconfig File
+
+Create the new kubeconfig file by defining the cluster, user (the ServiceAccount), and context:
+
+```bash
+# Create a new kubeconfig file with cluster configuration
+kubectl config --kubeconfig="$KUBECONFIG_FILE" set-cluster mcp-viewer-cluster \
+ --server="$API_SERVER" \
+ --certificate-authority="$CA_FILE" \
+ --embed-certs=true
+
+# Set the ServiceAccount token as the user credential
+kubectl config --kubeconfig="$KUBECONFIG_FILE" set-credentials mcp-viewer \
+ --token="$TOKEN"
+
+# Define the context (links the cluster and user)
+kubectl config --kubeconfig="$KUBECONFIG_FILE" set-context "$CONTEXT_NAME" \
+ --cluster=mcp-viewer-cluster \
+ --user=mcp-viewer
+
+# Set the new context as current
+kubectl config --kubeconfig="$KUBECONFIG_FILE" use-context "$CONTEXT_NAME"
+
+# Secure the file permissions
+chmod 600 "$KUBECONFIG_FILE"
+
+# Clean up temporary CA file if we created one
+if [[ "$CA_FILE" == /tmp/k8s-ca-*.crt ]]; then
+ rm -f "$CA_FILE"
+fi
+```
+
+### Quick Sanity Check
+
+You can now use this new file to verify access:
+
+```bash
+# Run a command using the dedicated kubeconfig file
+kubectl --kubeconfig="$KUBECONFIG_FILE" get pods -A
+```
+
+This command should successfully list all Pods if you chose **Option A: Cluster-Wide Read-Only**, proving the ServiceAccount and its token are correctly configured.
+
+## 4. Use with Kubernetes MCP Server
+
+Now that you have a dedicated kubeconfig file, you can use it with the Kubernetes MCP Server:
+
+```bash
+# Run the MCP server with the dedicated kubeconfig
+./kubernetes-mcp-server --kubeconfig="$HOME/.kube/mcp-viewer.kubeconfig"
+
+# Or use npx
+npx -y kubernetes-mcp-server@latest --kubeconfig="$HOME/.kube/mcp-viewer.kubeconfig"
+
+# Or use uvx
+uvx kubernetes-mcp-server@latest --kubeconfig="$HOME/.kube/mcp-viewer.kubeconfig"
+```
+
+Alternatively, you can set the `KUBECONFIG` environment variable:
+
+```bash
+export KUBECONFIG="$HOME/.kube/mcp-viewer.kubeconfig"
+./kubernetes-mcp-server
+```
+
+## Token Expiration and Renewal
+
+The token created in Step 2 has a limited lifetime (2 hours in the example). When it expires, you'll need to:
+
+1. Generate a new token:
+ ```bash
+ TOKEN="$(kubectl create token mcp-viewer --duration=2h -n mcp)"
+ ```
+
+2. Update the kubeconfig file:
+ ```bash
+ kubectl config --kubeconfig="$KUBECONFIG_FILE" set-credentials mcp-viewer --token="$TOKEN"
+ ```
+
+For long-running applications, consider:
+- Using a longer token duration (up to the cluster's maximum, typically 24h)
+- Implementing automatic token renewal in your application
+- Using a different authentication method (e.g., client certificates)
+
+## Cleanup
+
+To remove the ServiceAccount and associated RBAC bindings:
+
+```bash
+# Delete the ClusterRoleBinding (if using Option A)
+kubectl delete clusterrolebinding mcp-viewer-crb
+
+# Delete the RoleBinding (if using Option B)
+kubectl delete rolebinding mcp-viewer-rb -n mcp
+
+# Delete the ServiceAccount
+kubectl delete serviceaccount mcp-viewer -n mcp
+
+# Delete the namespace (optional - only if you created it specifically for this)
+kubectl delete namespace mcp
+
+# Remove the kubeconfig file
+rm "$HOME/.kube/mcp-viewer.kubeconfig"
+```
+
+## Troubleshooting
+
+### kubectl create token: command not found
+
+This command requires Kubernetes v1.24+. For older versions, you'll need to extract the token from the ServiceAccount's secret manually.
+
+### Permission denied errors
+
+Ensure you're using the correct kubeconfig file and that the ServiceAccount has the necessary RBAC permissions. Verify with:
+
+```bash
+kubectl auth can-i list pods --as=system:serviceaccount:mcp:mcp-viewer --all-namespaces
+```
+
+## Next Steps
+
+Now that you have a working kubeconfig with read-only access, configure Claude Code CLI:
+
+- **[Using with Claude Code CLI](GETTING_STARTED_CLAUDE_CODE.md)** - Configure the MCP server with Claude Code CLI
+
+You can also:
+- Explore the [main README](../README.md) for more MCP server capabilities
diff --git a/docs/KEYCLOAK_OIDC_SETUP.md b/docs/KEYCLOAK_OIDC_SETUP.md
new file mode 100644
index 00000000..149324fb
--- /dev/null
+++ b/docs/KEYCLOAK_OIDC_SETUP.md
@@ -0,0 +1,221 @@
+# Keycloak OIDC Setup for Kubernetes MCP Server
+
+> **⚠️ Preview Feature**
+>
+> OIDC/OAuth authentication setup is currently in **preview**. Configuration flags or fields may change. Use for **development and testing only**.
+
+This guide shows you how to set up a local development environment with Keycloak for OIDC authentication testing.
+
+## Overview
+
+The local development environment includes:
+- **Kind cluster** with OIDC-enabled API server
+- **Keycloak** (deployed in the cluster) for OIDC provider
+- **Kubernetes MCP Server** configured for OAuth/OIDC authentication
+
+## Quick Start
+
+Set up the complete environment with one command:
+
+```bash
+make local-env-setup
+```
+
+This will:
+1. Install required tools (kind) to `./_output/bin/`
+2. Create a Kind cluster with OIDC configuration
+3. Deploy Keycloak in the cluster
+4. Configure Keycloak realm and clients
+5. Build the MCP server binary
+6. Generate a configuration file at `_output/config.toml`
+
+## Running the MCP Server
+
+After setup completes, run the server:
+
+```bash
+# Start the server
+./kubernetes-mcp-server --port 8008 --config _output/config.toml
+```
+
+Or use the MCP Inspector for testing:
+
+```bash
+npx @modelcontextprotocol/inspector@latest $(pwd)/kubernetes-mcp-server --config _output/config.toml
+```
+
+## Quick Walkthrough
+
+### 1. Start MCP Inspector and Connect
+
+After running the inspector, in the `Authentication`'s **OAuth 2.0 Flow** set the `Client ID` to be `mcp-client` and the `Scope` to `mcp-server`, afterwards click the "Connect" button.
+
+
+
+
+
+### 2. Login to Keycloak
+
+You'll be redirected to Keycloak. Enter the test credentials:
+- Username: `mcp`
+- Password: `mcp`
+
+
+
+
+
+### 3. Use MCP Tools
+
+After authentication, you can use the **Tools** from the Kubernetes-MCP-Server from the MCP Inspector, like below where we run the `pods_list` tool, to list all pods in the current cluster from all namespaces.
+
+
+
+
+
+## Architecture
+
+### Keycloak Deployment
+- Runs as a StatefulSet in the `keycloak` namespace
+- Exposed via Ingress with TLS at `https://keycloak.127-0-0-1.sslip.io:8443`
+- Uses cert-manager for TLS certificates
+- Accessible from both host and cluster pods
+
+### Kind Cluster with OIDC
+- Kubernetes API server configured with OIDC authentication
+- Points to Keycloak's `openshift` realm as the OIDC issuer
+- Validates bearer tokens against Keycloak's JWKS endpoint
+- API server trusts the cert-manager CA certificate
+
+### Authentication Flow
+
+```
+User Browser
+ |
+ | 1. OAuth login (https://keycloak.127-0-0-1.sslip.io:8443)
+ v
+Keycloak
+ |
+ | 2. ID Token (aud: mcp-server)
+ v
+MCP Server
+ |
+ | 3. Token Exchange (aud: openshift)
+ v
+Keycloak
+ |
+ | 4. Exchanged Access Token
+ v
+MCP Server
+ |
+ | 5. Bearer Token in API request
+ v
+Kubernetes API Server
+ |
+ | 6. Validate token via OIDC
+ v
+Keycloak JWKS
+ |
+ | 7. Token valid, execute tool
+ v
+MCP Server → User
+```
+
+## Keycloak Configuration
+
+The setup automatically configures:
+
+### Realm: `openshift`
+- Token lifespan: 30 minutes
+- Session idle timeout: 30 minutes
+
+### Clients
+
+1. **mcp-client** (public)
+ - Public client for browser-based OAuth login
+ - PKCE required for security
+ - Valid redirect URIs: `*`
+
+2. **mcp-server** (confidential)
+ - Confidential client with client secret
+ - Standard token exchange enabled
+ - Can exchange tokens with `aud: openshift`
+ - Default scopes: `openid`, `groups`, `mcp-server`
+ - Optional scopes: `mcp:openshift`
+
+3. **openshift** (confidential)
+ - Target client for token exchange
+ - Accepts exchanged tokens from `mcp-server`
+ - Used by Kubernetes API server for OIDC validation
+
+### Client Scopes
+- **mcp-server**: Default scope with audience mapper
+- **mcp:openshift**: Optional scope for token exchange with audience mapper
+- **groups**: Group membership mapper (included in tokens)
+
+### Default User
+- **Username**: `mcp`
+- **Password**: `mcp`
+- **Email**: `mcp@example.com`
+- **RBAC**: `cluster-admin` (full cluster access)
+
+## MCP Server Configuration
+
+The generated `_output/config.toml` includes:
+
+```toml
+require_oauth = true
+oauth_audience = "mcp-server"
+oauth_scopes = ["openid", "mcp-server"]
+validate_token = false # Validation done by K8s API server
+authorization_url = "https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"
+
+sts_client_id = "mcp-server"
+sts_client_secret = "..." # Auto-generated
+sts_audience = "openshift" # Triggers token exchange
+sts_scopes = ["mcp:openshift"]
+
+certificate_authority = "_output/cert-manager-ca/ca.crt" # For HTTPS validation
+```
+
+## Useful Commands
+
+### Check Keycloak Status
+
+```bash
+make keycloak-status
+```
+
+Shows:
+- Keycloak pod status
+- Service endpoints
+- Access URL
+- Admin credentials
+
+### View Keycloak Logs
+
+```bash
+make keycloak-logs
+```
+
+### Access Keycloak Admin Console
+
+Open your browser to:
+```
+https://keycloak.127-0-0-1.sslip.io:8443
+```
+
+**Admin credentials:**
+- Username: `admin`
+- Password: `admin`
+
+Navigate to the `openshift` realm to view/modify the configuration.
+
+## Teardown
+
+Remove the local environment:
+
+```bash
+make local-env-teardown
+```
+
+This deletes the Kind cluster (Keycloak is removed with it).
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..0eaa634e
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,22 @@
+# Kubernetes MCP Server Documentation
+
+Welcome to the Kubernetes MCP Server documentation! This directory contains guides to help you set up and use the Kubernetes MCP Server with your Kubernetes cluster and Claude Code CLI.
+
+## Getting Started Guides
+
+Choose the guide that matches your needs:
+
+| Guide | Description | Best For |
+|-------|-------------|----------|
+| **[Getting Started with Kubernetes](GETTING_STARTED_KUBERNETES.md)** | Base setup: Create ServiceAccount, token, and kubeconfig | Everyone - **start here first** |
+| **[Using with Claude Code CLI](GETTING_STARTED_CLAUDE_CODE.md)** | Configure MCP server with Claude Code CLI | Claude Code CLI users |
+
+## Recommended Workflow
+
+1. **Complete the base setup**: Start with [Getting Started with Kubernetes](GETTING_STARTED_KUBERNETES.md) to create a ServiceAccount and kubeconfig file
+2. **Configure Claude Code**: Then follow the [Claude Code CLI guide](GETTING_STARTED_CLAUDE_CODE.md)
+
+## Additional Documentation
+
+- **[Keycloak OIDC Setup](KEYCLOAK_OIDC_SETUP.md)** - Developer guide for local Keycloak environment and testing with MCP Inspector
+- **[Main README](../README.md)** - Project overview and general information
diff --git a/docs/images/keycloak-login-page.png b/docs/images/keycloak-login-page.png
new file mode 100644
index 00000000..2e35d403
Binary files /dev/null and b/docs/images/keycloak-login-page.png differ
diff --git a/docs/images/keycloak-mcp-inspector-connect.png b/docs/images/keycloak-mcp-inspector-connect.png
new file mode 100644
index 00000000..4a582d1c
Binary files /dev/null and b/docs/images/keycloak-mcp-inspector-connect.png differ
diff --git a/docs/images/keycloak-mcp-inspector-results.png b/docs/images/keycloak-mcp-inspector-results.png
new file mode 100644
index 00000000..7498e4d1
Binary files /dev/null and b/docs/images/keycloak-mcp-inspector-results.png differ
diff --git a/go.mod b/go.mod
index c08f892e..efb19dc0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/containers/kubernetes-mcp-server
-go 1.24.1
+go 1.24.10
require (
github.com/BurntSushi/toml v1.5.0
@@ -8,16 +8,15 @@ require (
github.com/fsnotify/fsnotify v1.9.0
github.com/go-jose/go-jose/v4 v4.1.3
github.com/google/jsonschema-go v0.3.0
- github.com/mark3labs/mcp-go v0.41.1
+ github.com/mark3labs/mcp-go v0.43.0
github.com/pkg/errors v0.9.1
github.com/spf13/afero v1.15.0
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
- golang.org/x/net v0.46.0
- golang.org/x/oauth2 v0.32.0
- golang.org/x/sync v0.17.0
- helm.sh/helm/v3 v3.19.0
+ golang.org/x/oauth2 v0.33.0
+ golang.org/x/sync v0.18.0
+ helm.sh/helm/v3 v3.19.1
k8s.io/api v0.34.1
k8s.io/apiextensions-apiserver v0.34.1
k8s.io/apimachinery v0.34.1
@@ -27,7 +26,7 @@ require (
k8s.io/kubectl v0.34.1
k8s.io/metrics v0.34.1
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
- sigs.k8s.io/controller-runtime v0.22.2
+ sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664
sigs.k8s.io/yaml v1.6.0
)
@@ -47,11 +46,11 @@ require (
github.com/buger/jsonparser v1.1.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.3 // indirect
- github.com/containerd/containerd v1.7.28 // indirect
+ github.com/containerd/containerd v1.7.29 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
- github.com/cyphar/filepath-securejoin v0.4.1 // indirect
+ github.com/cyphar/filepath-securejoin v0.6.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
@@ -122,10 +121,11 @@ require (
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.43.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/term v0.36.0 // indirect
- golang.org/x/text v0.30.0 // indirect
+ golang.org/x/crypto v0.44.0 // indirect
+ golang.org/x/net v0.47.0 // indirect
+ golang.org/x/sys v0.38.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect
google.golang.org/grpc v1.72.1 // indirect
diff --git a/go.sum b/go.sum
index 4d36580c..83314406 100644
--- a/go.sum
+++ b/go.sum
@@ -40,8 +40,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80=
github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
-github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c=
-github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
+github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE=
+github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
@@ -55,8 +55,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
-github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
+github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is=
+github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -187,8 +187,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
-github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA=
-github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g=
+github.com/mark3labs/mcp-go v0.43.0 h1:lgiKcWMddh4sngbU+hoWOZ9iAe/qp/m851RQpj3Y7jA=
+github.com/mark3labs/mcp-go v0.43.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
@@ -357,47 +357,47 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
-golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
+golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
+golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
-golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
-golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
+golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
-golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
-golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -425,8 +425,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k=
-helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc=
+helm.sh/helm/v3 v3.19.1 h1:QVMzHbanyurO8oynx0drDOfG02XxSvrHqaFrf9yrMf0=
+helm.sh/helm/v3 v3.19.1/go.mod h1:gX10tB5ErM+8fr7bglUUS/UfTOO8UUTYWIBH1IYNnpE=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
@@ -453,8 +453,8 @@ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
-sigs.k8s.io/controller-runtime v0.22.2 h1:cK2l8BGWsSWkXz09tcS4rJh95iOLney5eawcK5A33r4=
-sigs.k8s.io/controller-runtime v0.22.2/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
+sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
+sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 h1:xC7x7FsPURJYhZnWHsWFd7nkdD/WRtQVWPC28FWt85Y=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664/go.mod h1:Cq9jUhwSYol5tNB0O/1vLYxNV9KqnhpvEa6HvJ1w0wY=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
diff --git a/hack/generate-placeholder-ca.sh b/hack/generate-placeholder-ca.sh
new file mode 100755
index 00000000..5428304d
--- /dev/null
+++ b/hack/generate-placeholder-ca.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+
+# Generate a placeholder self-signed CA certificate for KIND cluster startup
+# This will be replaced with the real cert-manager CA after the cluster is created
+
+CERT_DIR="_output/cert-manager-ca"
+CA_CERT="$CERT_DIR/ca.crt"
+CA_KEY="$CERT_DIR/ca.key"
+
+mkdir -p "$CERT_DIR"
+
+# Generate a self-signed CA certificate (valid placeholder)
+openssl req -x509 -newkey rsa:2048 -nodes \
+ -keyout "$CA_KEY" \
+ -out "$CA_CERT" \
+ -days 365 \
+ -subj "/CN=placeholder-ca" \
+ 2>/dev/null
+
+echo "✅ Placeholder CA certificate created at $CA_CERT"
+echo "⚠️ This will be replaced with cert-manager CA after cluster creation"
diff --git a/internal/test/env.go b/internal/test/env.go
new file mode 100644
index 00000000..4d6afe7e
--- /dev/null
+++ b/internal/test/env.go
@@ -0,0 +1,15 @@
+package test
+
+import (
+ "os"
+ "strings"
+)
+
+func RestoreEnv(originalEnv []string) {
+ os.Clearenv()
+ for _, env := range originalEnv {
+ if key, value, found := strings.Cut(env, "="); found {
+ _ = os.Setenv(key, value)
+ }
+ }
+}
diff --git a/internal/test/mcp.go b/internal/test/mcp.go
index 8daaae40..174fe4eb 100644
--- a/internal/test/mcp.go
+++ b/internal/test/mcp.go
@@ -1,35 +1,41 @@
package test
import (
+ "context"
+ "net/http"
"net/http/httptest"
"testing"
"github.com/mark3labs/mcp-go/client"
+ "github.com/mark3labs/mcp-go/client/transport"
"github.com/mark3labs/mcp-go/mcp"
- "github.com/mark3labs/mcp-go/server"
"github.com/stretchr/testify/require"
- "golang.org/x/net/context"
)
+func McpInitRequest() mcp.InitializeRequest {
+ initRequest := mcp.InitializeRequest{}
+ initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION
+ initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"}
+ return initRequest
+}
+
type McpClient struct {
ctx context.Context
testServer *httptest.Server
*client.Client
}
-func NewMcpClient(t *testing.T, mcpHttpServer *server.StreamableHTTPServer) *McpClient {
+func NewMcpClient(t *testing.T, mcpHttpServer http.Handler, options ...transport.StreamableHTTPCOption) *McpClient {
require.NotNil(t, mcpHttpServer, "McpHttpServer must be provided")
var err error
ret := &McpClient{ctx: t.Context()}
ret.testServer = httptest.NewServer(mcpHttpServer)
- ret.Client, err = client.NewStreamableHttpClient(ret.testServer.URL + "/mcp")
+ options = append(options, transport.WithContinuousListening())
+ ret.Client, err = client.NewStreamableHttpClient(ret.testServer.URL+"/mcp", options...)
require.NoError(t, err, "Expected no error creating MCP client")
err = ret.Start(t.Context())
require.NoError(t, err, "Expected no error starting MCP client")
- initRequest := mcp.InitializeRequest{}
- initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION
- initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"}
- _, err = ret.Initialize(t.Context(), initRequest)
+ _, err = ret.Initialize(t.Context(), McpInitRequest())
require.NoError(t, err, "Expected no error initializing MCP client")
return ret
}
diff --git a/internal/test/mock_server.go b/internal/test/mock_server.go
index 58740ad6..e256f425 100644
--- a/internal/test/mock_server.go
+++ b/internal/test/mock_server.go
@@ -59,6 +59,10 @@ func (m *MockServer) Handle(handler http.Handler) {
m.restHandlers = append(m.restHandlers, handler.ServeHTTP)
}
+func (m *MockServer) ResetHandlers() {
+ m.restHandlers = make([]http.HandlerFunc, 0)
+}
+
func (m *MockServer) Config() *rest.Config {
return m.config
}
@@ -216,3 +220,33 @@ func (h *InOpenShiftHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)
return
}
}
+
+const tokenReviewSuccessful = `
+ {
+ "kind": "TokenReview",
+ "apiVersion": "authentication.k8s.io/v1",
+ "spec": {"token": "valid-token"},
+ "status": {
+ "authenticated": true,
+ "user": {
+ "username": "test-user",
+ "groups": ["system:authenticated"]
+ },
+ "audiences": ["the-audience"]
+ }
+ }`
+
+type TokenReviewHandler struct {
+ TokenReviewed bool
+}
+
+var _ http.Handler = (*TokenReviewHandler)(nil)
+
+func (h *TokenReviewHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(tokenReviewSuccessful))
+ h.TokenReviewed = true
+ return
+ }
+}
diff --git a/internal/test/test.go b/internal/test/test.go
index 03491422..c2ccec4e 100644
--- a/internal/test/test.go
+++ b/internal/test/test.go
@@ -1,9 +1,12 @@
package test
import (
+ "fmt"
+ "net"
"os"
"path/filepath"
"runtime"
+ "time"
)
func Must[T any](v T, err error) T {
@@ -19,3 +22,30 @@ func ReadFile(path ...string) string {
fileBytes := Must(os.ReadFile(filePath))
return string(fileBytes)
}
+
+func RandomPortAddress() (*net.TCPAddr, error) {
+ ln, err := net.Listen("tcp", "0.0.0.0:0")
+ if err != nil {
+ return nil, fmt.Errorf("failed to find random port for HTTP server: %v", err)
+ }
+ defer func() { _ = ln.Close() }()
+ tcpAddr, ok := ln.Addr().(*net.TCPAddr)
+ if !ok {
+ return nil, fmt.Errorf("failed to cast listener address to TCPAddr")
+ }
+ return tcpAddr, nil
+}
+
+func WaitForServer(tcpAddr *net.TCPAddr) error {
+ var conn *net.TCPConn
+ var err error
+ for i := 0; i < 10; i++ {
+ conn, err = net.DialTCP("tcp", nil, tcpAddr)
+ if err == nil {
+ _ = conn.Close()
+ break
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ return err
+}
diff --git a/npm/kubernetes-mcp-server-darwin-amd64/package.json b/npm/kubernetes-mcp-server-darwin-amd64/package.json
index f83bf58b..49e05004 100644
--- a/npm/kubernetes-mcp-server-darwin-amd64/package.json
+++ b/npm/kubernetes-mcp-server-darwin-amd64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-darwin-amd64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"darwin"
],
diff --git a/npm/kubernetes-mcp-server-darwin-arm64/package.json b/npm/kubernetes-mcp-server-darwin-arm64/package.json
index d8cbc618..f8e313c2 100644
--- a/npm/kubernetes-mcp-server-darwin-arm64/package.json
+++ b/npm/kubernetes-mcp-server-darwin-arm64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-darwin-arm64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"darwin"
],
diff --git a/npm/kubernetes-mcp-server-linux-amd64/package.json b/npm/kubernetes-mcp-server-linux-amd64/package.json
index deaa5364..1a519074 100644
--- a/npm/kubernetes-mcp-server-linux-amd64/package.json
+++ b/npm/kubernetes-mcp-server-linux-amd64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-linux-amd64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"linux"
],
diff --git a/npm/kubernetes-mcp-server-linux-arm64/package.json b/npm/kubernetes-mcp-server-linux-arm64/package.json
index ba2f6475..b861abeb 100644
--- a/npm/kubernetes-mcp-server-linux-arm64/package.json
+++ b/npm/kubernetes-mcp-server-linux-arm64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-linux-arm64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"linux"
],
diff --git a/npm/kubernetes-mcp-server-windows-amd64/package.json b/npm/kubernetes-mcp-server-windows-amd64/package.json
index 04b5d8ef..306e5047 100644
--- a/npm/kubernetes-mcp-server-windows-amd64/package.json
+++ b/npm/kubernetes-mcp-server-windows-amd64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-windows-amd64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"win32"
],
diff --git a/npm/kubernetes-mcp-server-windows-arm64/package.json b/npm/kubernetes-mcp-server-windows-arm64/package.json
index 38aa06f7..c30c4a30 100644
--- a/npm/kubernetes-mcp-server-windows-arm64/package.json
+++ b/npm/kubernetes-mcp-server-windows-arm64/package.json
@@ -2,6 +2,10 @@
"name": "kubernetes-mcp-server-windows-arm64",
"version": "0.0.0",
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/containers/kubernetes-mcp-server.git"
+ },
"os": [
"win32"
],
diff --git a/pkg/config/config.go b/pkg/config/config.go
index 3fb2428e..81bec2b7 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -1,7 +1,11 @@
package config
import (
+ "bytes"
+ "context"
+ "fmt"
"os"
+ "path/filepath"
"github.com/BurntSushi/toml"
)
@@ -17,7 +21,7 @@ const (
type StaticConfig struct {
DeniedResources []GroupVersionKind `toml:"denied_resources"`
- LogLevel int `toml:"log_level,omitempty"`
+ LogLevel int `toml:"log_level,omitzero"`
Port string `toml:"port,omitempty"`
SSEBaseURL string `toml:"sse_base_url,omitempty"`
KubeConfig string `toml:"kubeconfig,omitempty"`
@@ -59,15 +63,16 @@ type StaticConfig struct {
// If set to "kubeconfig", the clusters will be loaded from those in the kubeconfig.
// If set to "in-cluster", the server will use the in cluster config
ClusterProviderStrategy string `toml:"cluster_provider_strategy,omitempty"`
- // ClusterContexts is which context should be used for each cluster
- ClusterContexts map[string]string `toml:"cluster_contexts"`
-}
-func Default() *StaticConfig {
- return &StaticConfig{
- ListOutput: "table",
- Toolsets: []string{"core", "config", "helm"},
- }
+ // ClusterProvider-specific configurations
+ // This map holds raw TOML primitives that will be parsed by registered provider parsers
+ ClusterProviderConfigs map[string]toml.Primitive `toml:"cluster_provider_configs,omitempty"`
+
+ // Internal: parsed provider configs (not exposed to TOML package)
+ parsedClusterProviderConfigs map[string]ProviderConfig
+
+ // Internal: the config.toml directory, to help resolve relative file paths
+ configDirPath string
}
type GroupVersionKind struct {
@@ -76,20 +81,85 @@ type GroupVersionKind struct {
Kind string `toml:"kind,omitempty"`
}
-// Read reads the toml file and returns the StaticConfig.
-func Read(configPath string) (*StaticConfig, error) {
+type ReadConfigOpt func(cfg *StaticConfig)
+
+func withDirPath(path string) ReadConfigOpt {
+ return func(cfg *StaticConfig) {
+ cfg.configDirPath = path
+ }
+}
+
+// Read reads the toml file and returns the StaticConfig, with any opts applied.
+func Read(configPath string, opts ...ReadConfigOpt) (*StaticConfig, error) {
configData, err := os.ReadFile(configPath)
if err != nil {
return nil, err
}
- return ReadToml(configData)
+
+ // get and save the absolute dir path to the config file, so that other config parsers can use it
+ absPath, err := filepath.Abs(configPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve absolute path to config file: %w", err)
+ }
+ dirPath := filepath.Dir(absPath)
+
+ cfg, err := ReadToml(configData, append(opts, withDirPath(dirPath))...)
+ if err != nil {
+ return nil, err
+ }
+
+ return cfg, nil
}
-// ReadToml reads the toml data and returns the StaticConfig.
-func ReadToml(configData []byte) (*StaticConfig, error) {
+// ReadToml reads the toml data and returns the StaticConfig, with any opts applied
+func ReadToml(configData []byte, opts ...ReadConfigOpt) (*StaticConfig, error) {
config := Default()
- if err := toml.Unmarshal(configData, config); err != nil {
+ md, err := toml.NewDecoder(bytes.NewReader(configData)).Decode(config)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if err := config.parseClusterProviderConfigs(md); err != nil {
return nil, err
}
+
return config, nil
}
+
+func (c *StaticConfig) GetProviderConfig(strategy string) (ProviderConfig, bool) {
+ config, ok := c.parsedClusterProviderConfigs[strategy]
+
+ return config, ok
+}
+
+func (c *StaticConfig) parseClusterProviderConfigs(md toml.MetaData) error {
+ if c.parsedClusterProviderConfigs == nil {
+ c.parsedClusterProviderConfigs = make(map[string]ProviderConfig, len(c.ClusterProviderConfigs))
+ }
+
+ ctx := withConfigDirPath(context.Background(), c.configDirPath)
+
+ for strategy, primitive := range c.ClusterProviderConfigs {
+ parser, ok := getProviderConfigParser(strategy)
+ if !ok {
+ continue
+ }
+
+ providerConfig, err := parser(ctx, primitive, md)
+ if err != nil {
+ return fmt.Errorf("failed to parse config for ClusterProvider '%s': %w", strategy, err)
+ }
+
+ if err := providerConfig.Validate(); err != nil {
+ return fmt.Errorf("invalid config file for ClusterProvider '%s': %w", strategy, err)
+ }
+
+ c.parsedClusterProviderConfigs[strategy] = providerConfig
+ }
+
+ return nil
+}
diff --git a/pkg/config/config_default.go b/pkg/config/config_default.go
new file mode 100644
index 00000000..febea70c
--- /dev/null
+++ b/pkg/config/config_default.go
@@ -0,0 +1,43 @@
+package config
+
+import (
+ "bytes"
+
+ "github.com/BurntSushi/toml"
+)
+
+func Default() *StaticConfig {
+ defaultConfig := StaticConfig{
+ ListOutput: "table",
+ Toolsets: []string{"core", "config", "helm"},
+ }
+ overrides := defaultOverrides()
+ mergedConfig := mergeConfig(defaultConfig, overrides)
+ return &mergedConfig
+}
+
+// HasDefaultOverrides indicates whether the internal defaultOverrides function
+// provides any overrides or an empty StaticConfig.
+func HasDefaultOverrides() bool {
+ overrides := defaultOverrides()
+ var buf bytes.Buffer
+ if err := toml.NewEncoder(&buf).Encode(overrides); err != nil {
+ // If marshaling fails, assume no overrides
+ return false
+ }
+ return len(bytes.TrimSpace(buf.Bytes())) > 0
+}
+
+// mergeConfig applies non-zero values from override to base using TOML serialization
+// and returns the merged StaticConfig.
+// In case of any error during marshalling or unmarshalling, it returns the base config unchanged.
+func mergeConfig(base, override StaticConfig) StaticConfig {
+ var overrideBuffer bytes.Buffer
+ if err := toml.NewEncoder(&overrideBuffer).Encode(override); err != nil {
+ // If marshaling fails, return base unchanged
+ return base
+ }
+
+ _, _ = toml.NewDecoder(&overrideBuffer).Decode(&base)
+ return base
+}
diff --git a/pkg/config/config_default_overrides.go b/pkg/config/config_default_overrides.go
new file mode 100644
index 00000000..70d065bc
--- /dev/null
+++ b/pkg/config/config_default_overrides.go
@@ -0,0 +1,8 @@
+package config
+
+func defaultOverrides() StaticConfig {
+ return StaticConfig{
+ // IMPORTANT: this file is used to override default config values in downstream builds.
+ // This is intentionally left blank.
+ }
+}
diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go
index b498548d..afdde191 100644
--- a/pkg/config/config_test.go
+++ b/pkg/config/config_test.go
@@ -11,10 +11,25 @@ import (
"github.com/stretchr/testify/suite"
)
-type ConfigSuite struct {
+type BaseConfigSuite struct {
suite.Suite
}
+func (s *BaseConfigSuite) writeConfig(content string) string {
+ s.T().Helper()
+ tempDir := s.T().TempDir()
+ path := filepath.Join(tempDir, "config.toml")
+ err := os.WriteFile(path, []byte(content), 0644)
+ if err != nil {
+ s.T().Fatalf("Failed to write config file %s: %v", path, err)
+ }
+ return path
+}
+
+type ConfigSuite struct {
+ BaseConfigSuite
+}
+
func (s *ConfigSuite) TestReadConfigMissingFile() {
config, err := Read("non-existent-config.toml")
s.Run("returns error for missing file", func() {
@@ -159,15 +174,47 @@ func (s *ConfigSuite) TestReadConfigValidPreservesDefaultsForMissingFields() {
})
}
-func (s *ConfigSuite) writeConfig(content string) string {
- s.T().Helper()
- tempDir := s.T().TempDir()
- path := filepath.Join(tempDir, "config.toml")
- err := os.WriteFile(path, []byte(content), 0644)
- if err != nil {
- s.T().Fatalf("Failed to write config file %s: %v", path, err)
+func (s *ConfigSuite) TestMergeConfig() {
+ base := StaticConfig{
+ ListOutput: "table",
+ Toolsets: []string{"core", "config", "helm"},
+ Port: "8080",
}
- return path
+ s.Run("merges override values on top of base", func() {
+ override := StaticConfig{
+ ListOutput: "json",
+ Port: "9090",
+ }
+
+ result := mergeConfig(base, override)
+
+ s.Equal("json", result.ListOutput, "ListOutput should be overridden")
+ s.Equal("9090", result.Port, "Port should be overridden")
+ })
+
+ s.Run("preserves base values when override is empty", func() {
+ override := StaticConfig{}
+
+ result := mergeConfig(base, override)
+
+ s.Equal("table", result.ListOutput, "ListOutput should be preserved from base")
+ s.Equal([]string{"core", "config", "helm"}, result.Toolsets, "Toolsets should be preserved from base")
+ s.Equal("8080", result.Port, "Port should be preserved from base")
+ })
+
+ s.Run("handles partial overrides", func() {
+ override := StaticConfig{
+ Toolsets: []string{"custom"},
+ ReadOnly: true,
+ }
+
+ result := mergeConfig(base, override)
+
+ s.Equal("table", result.ListOutput, "ListOutput should be preserved from base")
+ s.Equal([]string{"custom"}, result.Toolsets, "Toolsets should be overridden")
+ s.Equal("8080", result.Port, "Port should be preserved from base since override doesn't specify it")
+ s.True(result.ReadOnly, "ReadOnly should be overridden to true")
+ })
}
func TestConfig(t *testing.T) {
diff --git a/pkg/config/provider_config.go b/pkg/config/provider_config.go
new file mode 100644
index 00000000..45dd2f8d
--- /dev/null
+++ b/pkg/config/provider_config.go
@@ -0,0 +1,54 @@
+package config
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/BurntSushi/toml"
+)
+
+// ProviderConfig is the interface that all provider-specific configurations must implement.
+// Each provider registers a factory function to parse its config from TOML primitives
+type ProviderConfig interface {
+ Validate() error
+}
+
+type ProviderConfigParser func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error)
+
+type configDirPathKey struct{}
+
+func withConfigDirPath(ctx context.Context, dirPath string) context.Context {
+ return context.WithValue(ctx, configDirPathKey{}, dirPath)
+}
+
+func ConfigDirPathFromContext(ctx context.Context) string {
+ val := ctx.Value(configDirPathKey{})
+
+ if val == nil {
+ return ""
+ }
+
+ if strVal, ok := val.(string); ok {
+ return strVal
+ }
+
+ return ""
+}
+
+var (
+ providerConfigParsers = make(map[string]ProviderConfigParser)
+)
+
+func RegisterProviderConfig(strategy string, parser ProviderConfigParser) {
+ if _, exists := providerConfigParsers[strategy]; exists {
+ panic(fmt.Sprintf("provider config parser already registered for strategy '%s'", strategy))
+ }
+
+ providerConfigParsers[strategy] = parser
+}
+
+func getProviderConfigParser(strategy string) (ProviderConfigParser, bool) {
+ provider, ok := providerConfigParsers[strategy]
+
+ return provider, ok
+}
diff --git a/pkg/config/provider_config_test.go b/pkg/config/provider_config_test.go
new file mode 100644
index 00000000..84902da4
--- /dev/null
+++ b/pkg/config/provider_config_test.go
@@ -0,0 +1,188 @@
+package config
+
+import (
+ "context"
+ "errors"
+ "path/filepath"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/stretchr/testify/suite"
+)
+
+type ProviderConfigSuite struct {
+ BaseConfigSuite
+ originalProviderConfigParsers map[string]ProviderConfigParser
+}
+
+func (s *ProviderConfigSuite) SetupTest() {
+ s.originalProviderConfigParsers = make(map[string]ProviderConfigParser)
+ for k, v := range providerConfigParsers {
+ s.originalProviderConfigParsers[k] = v
+ }
+}
+
+func (s *ProviderConfigSuite) TearDownTest() {
+ providerConfigParsers = make(map[string]ProviderConfigParser)
+ for k, v := range s.originalProviderConfigParsers {
+ providerConfigParsers[k] = v
+ }
+}
+
+type ProviderConfigForTest struct {
+ BoolProp bool `toml:"bool_prop"`
+ StrProp string `toml:"str_prop"`
+ IntProp int `toml:"int_prop"`
+}
+
+var _ ProviderConfig = (*ProviderConfigForTest)(nil)
+
+func (p *ProviderConfigForTest) Validate() error {
+ if p.StrProp == "force-error" {
+ return errors.New("validation error forced by test")
+ }
+ return nil
+}
+
+func providerConfigForTestParser(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+ var providerConfigForTest ProviderConfigForTest
+ if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil {
+ return nil, err
+ }
+ return &providerConfigForTest, nil
+}
+
+func (s *ProviderConfigSuite) TestRegisterProviderConfig() {
+ s.Run("panics when registering duplicate provider config parser", func() {
+ s.Panics(func() {
+ RegisterProviderConfig("test", providerConfigForTestParser)
+ RegisterProviderConfig("test", providerConfigForTestParser)
+ }, "Expected panic when registering duplicate provider config parser")
+ })
+}
+
+func (s *ProviderConfigSuite) TestReadConfigValid() {
+ RegisterProviderConfig("test", providerConfigForTestParser)
+ validConfigPath := s.writeConfig(`
+ cluster_provider_strategy = "test"
+ [cluster_provider_configs.test]
+ bool_prop = true
+ str_prop = "a string"
+ int_prop = 42
+ `)
+
+ config, err := Read(validConfigPath)
+ s.Run("returns no error for valid file with registered provider config", func() {
+ s.Require().NoError(err, "Expected no error for valid file, got %v", err)
+ })
+ s.Run("returns config for valid file with registered provider config", func() {
+ s.Require().NotNil(config, "Expected non-nil config for valid file")
+ })
+ s.Run("parses provider config correctly", func() {
+ providerConfig, ok := config.GetProviderConfig("test")
+ s.Require().True(ok, "Expected to find provider config for strategy 'test'")
+ s.Require().NotNil(providerConfig, "Expected non-nil provider config for strategy 'test'")
+ testProviderConfig, ok := providerConfig.(*ProviderConfigForTest)
+ s.Require().True(ok, "Expected provider config to be of type *ProviderConfigForTest")
+ s.Equal(true, testProviderConfig.BoolProp, "Expected BoolProp to be true")
+ s.Equal("a string", testProviderConfig.StrProp, "Expected StrProp to be 'a string'")
+ s.Equal(42, testProviderConfig.IntProp, "Expected IntProp to be 42")
+ })
+}
+
+func (s *ProviderConfigSuite) TestReadConfigInvalidProviderConfig() {
+ RegisterProviderConfig("test", providerConfigForTestParser)
+ invalidConfigPath := s.writeConfig(`
+ cluster_provider_strategy = "test"
+ [cluster_provider_configs.test]
+ bool_prop = true
+ str_prop = "force-error"
+ int_prop = 42
+ `)
+
+ config, err := Read(invalidConfigPath)
+ s.Run("returns error for invalid provider config", func() {
+ s.Require().NotNil(err, "Expected error for invalid provider config, got nil")
+ s.ErrorContains(err, "validation error forced by test", "Expected validation error from provider config")
+ })
+ s.Run("returns nil config for invalid provider config", func() {
+ s.Nil(config, "Expected nil config for invalid provider config")
+ })
+}
+
+func (s *ProviderConfigSuite) TestReadConfigUnregisteredProviderConfig() {
+ invalidConfigPath := s.writeConfig(`
+ cluster_provider_strategy = "unregistered"
+ [cluster_provider_configs.unregistered]
+ bool_prop = true
+ str_prop = "a string"
+ int_prop = 42
+ `)
+
+ config, err := Read(invalidConfigPath)
+ s.Run("returns no error for unregistered provider config", func() {
+ s.Require().NoError(err, "Expected no error for unregistered provider config, got %v", err)
+ })
+ s.Run("returns config for unregistered provider config", func() {
+ s.Require().NotNil(config, "Expected non-nil config for unregistered provider config")
+ })
+ s.Run("does not parse unregistered provider config", func() {
+ _, ok := config.GetProviderConfig("unregistered")
+ s.Require().False(ok, "Expected no provider config for unregistered strategy")
+ })
+}
+
+func (s *ProviderConfigSuite) TestReadConfigParserError() {
+ RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+ return nil, errors.New("parser error forced by test")
+ })
+ invalidConfigPath := s.writeConfig(`
+ cluster_provider_strategy = "test"
+ [cluster_provider_configs.test]
+ bool_prop = true
+ str_prop = "a string"
+ int_prop = 42
+ `)
+
+ config, err := Read(invalidConfigPath)
+ s.Run("returns error for provider config parser error", func() {
+ s.Require().NotNil(err, "Expected error for provider config parser error, got nil")
+ s.ErrorContains(err, "parser error forced by test", "Expected parser error from provider config")
+ })
+ s.Run("returns nil config for provider config parser error", func() {
+ s.Nil(config, "Expected nil config for provider config parser error")
+ })
+}
+
+func (s *ProviderConfigSuite) TestConfigDirPathInContext() {
+ var capturedDirPath string
+ RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+ capturedDirPath = ConfigDirPathFromContext(ctx)
+ var providerConfigForTest ProviderConfigForTest
+ if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil {
+ return nil, err
+ }
+ return &providerConfigForTest, nil
+ })
+ configPath := s.writeConfig(`
+ cluster_provider_strategy = "test"
+ [cluster_provider_configs.test]
+ bool_prop = true
+ str_prop = "a string"
+ int_prop = 42
+ `)
+
+ absConfigPath, err := filepath.Abs(configPath)
+ s.Require().NoError(err, "test error: getting the absConfigPath should not fail")
+
+ _, err = Read(configPath)
+ s.Run("provides config directory path in context to parser", func() {
+ s.Require().NoError(err, "Expected no error reading config")
+ s.NotEmpty(capturedDirPath, "Expected non-empty directory path in context")
+ s.Equal(filepath.Dir(absConfigPath), capturedDirPath, "Expected directory path to match config file directory")
+ })
+}
+
+func TestProviderConfig(t *testing.T) {
+ suite.Run(t, new(ProviderConfigSuite))
+}
diff --git a/pkg/http/authorization.go b/pkg/http/authorization.go
index 261fdb92..19f61709 100644
--- a/pkg/http/authorization.go
+++ b/pkg/http/authorization.go
@@ -23,7 +23,7 @@ import (
type KubernetesApiTokenVerifier interface {
// KubernetesApiVerifyToken TODO: clarify proper implementation
- KubernetesApiVerifyToken(ctx context.Context, token, audience, cluster string) (*authenticationapiv1.UserInfo, []string, error)
+ KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
GetTargetParameterName() string
}
@@ -108,7 +108,7 @@ func write401(w http.ResponseWriter, wwwAuthenticateHeader, errorType, message s
// - If ValidateToken is set, the exchanged token is then used against the Kubernetes API Server for TokenReview.
//
// see TestAuthorizationOidcTokenExchange
-func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier) func(http.Handler) http.Handler {
+func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier, httpClient *http.Client) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == healthEndpoint || slices.Contains(WellKnownEndpoints, r.URL.EscapedPath()) {
@@ -159,7 +159,11 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi
if err == nil && sts.IsEnabled() {
var exchangedToken *oauth2.Token
// If the token is valid, we can exchange it for a new token with the specified audience and scopes.
- exchangedToken, err = sts.ExternalAccountTokenExchange(r.Context(), &oauth2.Token{
+ ctx := r.Context()
+ if httpClient != nil {
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+ }
+ exchangedToken, err = sts.ExternalAccountTokenExchange(ctx, &oauth2.Token{
AccessToken: claims.Token,
TokenType: "Bearer",
})
@@ -247,7 +251,7 @@ func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, p
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error {
if verifier != nil {
- _, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience, cluster)
+ _, _, err := verifier.KubernetesApiVerifyToken(ctx, cluster, c.Token, audience)
if err != nil {
return fmt.Errorf("kubernetes API token validation error: %v", err)
}
diff --git a/pkg/http/http.go b/pkg/http/http.go
index 3f74c09f..8001462c 100644
--- a/pkg/http/http.go
+++ b/pkg/http/http.go
@@ -24,11 +24,11 @@ const (
sseMessageEndpoint = "/message"
)
-func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider) error {
+func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, httpClient *http.Client) error {
mux := http.NewServeMux()
wrappedMux := RequestMiddleware(
- AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer)(mux),
+ AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer, httpClient)(mux),
)
httpServer := &http.Server{
@@ -44,7 +44,7 @@ func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.Stat
mux.HandleFunc(healthEndpoint, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
- mux.Handle("/.well-known/", WellKnownHandler(staticConfig))
+ mux.Handle("/.well-known/", WellKnownHandler(staticConfig, httpClient))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
diff --git a/pkg/http/http_authorization_test.go b/pkg/http/http_authorization_test.go
new file mode 100644
index 00000000..a8995c45
--- /dev/null
+++ b/pkg/http/http_authorization_test.go
@@ -0,0 +1,472 @@
+package http
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/coreos/go-oidc/v3/oidc"
+ "github.com/coreos/go-oidc/v3/oidc/oidctest"
+ "github.com/mark3labs/mcp-go/client"
+ "github.com/mark3labs/mcp-go/client/transport"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/klog/v2"
+ "k8s.io/klog/v2/textlogger"
+)
+
+type AuthorizationSuite struct {
+ BaseHttpSuite
+ mcpClient *client.Client
+ klogState klog.State
+ logBuffer bytes.Buffer
+}
+
+func (s *AuthorizationSuite) SetupTest() {
+ s.BaseHttpSuite.SetupTest()
+
+ // Capture logs
+ s.klogState = klog.CaptureState()
+ flags := flag.NewFlagSet("test", flag.ContinueOnError)
+ klog.InitFlags(flags)
+ _ = flags.Set("v", "5")
+ klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(&s.logBuffer))))
+
+ // Default Auth settings (overridden in tests as needed)
+ s.OidcProvider = nil
+ s.StaticConfig.RequireOAuth = true
+ s.StaticConfig.ValidateToken = true
+ s.StaticConfig.OAuthAudience = ""
+ s.StaticConfig.StsClientId = ""
+ s.StaticConfig.StsClientSecret = ""
+ s.StaticConfig.StsAudience = ""
+ s.StaticConfig.StsScopes = []string{}
+}
+
+func (s *AuthorizationSuite) TearDownTest() {
+ s.BaseHttpSuite.TearDownTest()
+ s.klogState.Restore()
+
+ if s.mcpClient != nil {
+ _ = s.mcpClient.Close()
+ }
+}
+
+func (s *AuthorizationSuite) StartClient(options ...transport.StreamableHTTPCOption) {
+ var err error
+ s.mcpClient, err = client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), options...)
+ s.Require().NoError(err, "Expected no error creating Streamable HTTP MCP client")
+ err = s.mcpClient.Start(s.T().Context())
+ s.Require().NoError(err, "Expected no error starting Streamable HTTP MCP client")
+}
+
+func (s *AuthorizationSuite) HttpGet(authHeader string) *http.Response {
+ req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), nil)
+ s.Require().NoError(err, "Failed to create request")
+ if authHeader != "" {
+ req.Header.Set("Authorization", authHeader)
+ }
+ resp, err := http.DefaultClient.Do(req)
+ s.Require().NoError(err, "Failed to get protected endpoint")
+ return resp
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedMissingHeader() {
+ // Missing Authorization header
+ s.StartServer()
+ s.StartClient()
+
+ s.Run("Initialize returns error for MISSING Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Bearer token required")
+ })
+
+ s.Run("Protected resource with MISSING Authorization header", func() {
+ resp := s.HttpGet("")
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for MISSING Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - missing or invalid bearer token", "Expected log entry for missing or invalid bearer token")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderIncompatible() {
+ // Authorization header without Bearer prefix
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Basic YWxhZGRpbjpvcGVuc2VzYW1l",
+ }))
+
+ s.Run("Initialize returns error for INCOMPATIBLE Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Bearer token required")
+ })
+
+ s.Run("Protected resource with INCOMPATIBLE Authorization header", func() {
+ resp := s.HttpGet("Basic YWxhZGRpbjpvcGVuc2VzYW1l")
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INCOMPATIBLE Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - missing or invalid bearer token", "Expected log entry for missing or invalid bearer token")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderInvalid() {
+ // Invalid Authorization header
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid"),
+ }))
+
+ s.Run("Initialize returns error for INVALID Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token")
+ })
+
+ s.Run("Protected resource with INVALID Authorization header", func() {
+ resp := s.HttpGet("Bearer " + strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid"))
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error")
+ s.Contains(s.logBuffer.String(), "error: failed to parse JWT token: illegal base64 data", "Expected log entry for JWT validation error details")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderExpired() {
+ // Expired Authorization Bearer token
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + tokenBasicExpired,
+ }))
+
+ s.Run("Initialize returns error for EXPIRED Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token")
+ })
+
+ s.Run("Protected resource with EXPIRED Authorization header", func() {
+ resp := s.HttpGet("Bearer " + tokenBasicExpired)
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for EXPIRED Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error")
+ s.Contains(s.logBuffer.String(), "validation failed, token is expired (exp)", "Expected log entry for JWT validation error details")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderInvalidAudience() {
+ // Invalid audience claim Bearer token
+ s.StaticConfig.OAuthAudience = "expected-audience"
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + tokenBasicNotExpired,
+ }))
+
+ s.Run("Initialize returns error for INVALID AUDIENCE Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token")
+ })
+
+ s.Run("Protected resource with INVALID AUDIENCE Authorization header", func() {
+ resp := s.HttpGet("Bearer " + tokenBasicNotExpired)
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID AUDIENCE Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", audience="expected-audience", error="invalid_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error")
+ s.Contains(s.logBuffer.String(), "invalid audience claim (aud)", "Expected log entry for JWT validation error details")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedOidcValidation() {
+ // Failed OIDC validation
+ s.StaticConfig.OAuthAudience = "mcp-server"
+ oidcTestServer := NewOidcTestServer(s.T())
+ s.T().Cleanup(oidcTestServer.Close)
+ s.OidcProvider = oidcTestServer.Provider
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + tokenBasicNotExpired,
+ }))
+
+ s.Run("Initialize returns error for INVALID OIDC Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token")
+ })
+
+ s.Run("Protected resource with INVALID OIDC Authorization header", func() {
+ resp := s.HttpGet("Bearer " + tokenBasicNotExpired)
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID OIDC Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error")
+ s.Contains(s.logBuffer.String(), "OIDC token validation error: failed to verify signature", "Expected log entry for OIDC validation error details")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationUnauthorizedKubernetesValidation() {
+ // Failed Kubernetes TokenReview
+ s.StaticConfig.OAuthAudience = "mcp-server"
+ oidcTestServer := NewOidcTestServer(s.T())
+ s.T().Cleanup(oidcTestServer.Close)
+ rawClaims := `{
+ "iss": "` + oidcTestServer.URL + `",
+ "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
+ "aud": "mcp-server"
+ }`
+ validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
+ s.OidcProvider = oidcTestServer.Provider
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + validOidcToken,
+ }))
+
+ s.Run("Initialize returns error for INVALID KUBERNETES Authorization header", func() {
+ _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().Error(err, "Expected error creating initial request")
+ s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token")
+ })
+
+ s.Run("Protected resource with INVALID KUBERNETES Authorization header", func() {
+ resp := s.HttpGet("Bearer " + validOidcToken)
+ s.T().Cleanup(func() { _ = resp.Body.Close })
+
+ s.Run("returns 401 - Unauthorized status", func() {
+ s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID KUBERNETES Authorization header")
+ })
+ s.Run("returns WWW-Authenticate header", func() {
+ authHeader := resp.Header.Get("WWW-Authenticate")
+ expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"`
+ s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match")
+ })
+ s.Run("logs error", func() {
+ s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error")
+ s.Contains(s.logBuffer.String(), "kubernetes API token validation error: failed to create token review", "Expected log entry for Kubernetes TokenReview error details")
+ })
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationRequireOAuthFalse() {
+ s.StaticConfig.RequireOAuth = false
+ s.StartServer()
+ s.StartClient()
+
+ s.Run("Initialize returns OK for MISSING Authorization header", func() {
+ result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error creating initial request")
+ s.Require().NotNil(result, "Expected initial request to not be nil")
+ })
+}
+
+func (s *AuthorizationSuite) TestAuthorizationRawToken() {
+ tokenReviewHandler := &test.TokenReviewHandler{}
+ s.MockServer.Handle(tokenReviewHandler)
+
+ cases := []struct {
+ audience string
+ validateToken bool
+ }{
+ {"", false}, // No audience, no validation
+ {"", true}, // No audience, validation enabled
+ {"mcp-server", false}, // Audience set, no validation
+ {"mcp-server", true}, // Audience set, validation enabled
+ }
+ for _, c := range cases {
+ s.StaticConfig.OAuthAudience = c.audience
+ s.StaticConfig.ValidateToken = c.validateToken
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + tokenBasicNotExpired,
+ }))
+ tokenReviewHandler.TokenReviewed = false
+
+ s.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t'", c.audience, c.validateToken), func() {
+ s.Run("Initialize returns OK for VALID Authorization header", func() {
+ result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error creating initial request")
+ s.Require().NotNil(result, "Expected initial request to not be nil")
+ })
+
+ s.Run("Performs token validation accordingly", func() {
+ if tokenReviewHandler.TokenReviewed == true && !c.validateToken {
+ s.Fail("Expected token review to be skipped when validate-token is false, but it was performed")
+ }
+ if tokenReviewHandler.TokenReviewed == false && c.validateToken {
+ s.Fail("Expected token review to be performed when validate-token is true, but it was skipped")
+ }
+ })
+ })
+ _ = s.mcpClient.Close()
+ s.StopServer()
+ }
+}
+
+func (s *AuthorizationSuite) TestAuthorizationOidcToken() {
+ tokenReviewHandler := &test.TokenReviewHandler{}
+ s.MockServer.Handle(tokenReviewHandler)
+
+ oidcTestServer := NewOidcTestServer(s.T())
+ s.T().Cleanup(oidcTestServer.Close)
+ rawClaims := `{
+ "iss": "` + oidcTestServer.URL + `",
+ "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
+ "aud": "mcp-server"
+ }`
+ validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
+
+ cases := []bool{false, true}
+ for _, validateToken := range cases {
+ s.OidcProvider = oidcTestServer.Provider
+ s.StaticConfig.OAuthAudience = "mcp-server"
+ s.StaticConfig.ValidateToken = validateToken
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + validOidcToken,
+ }))
+ tokenReviewHandler.TokenReviewed = false
+
+ s.Run(fmt.Sprintf("Protected resource with validate-token = '%t'", validateToken), func() {
+ s.Run("Initialize returns OK for VALID OIDC Authorization header", func() {
+ result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error creating initial request")
+ s.Require().NotNil(result, "Expected initial request to not be nil")
+ })
+
+ s.Run("Performs token validation accordingly for VALID OIDC Authorization header", func() {
+ if tokenReviewHandler.TokenReviewed == true && !validateToken {
+ s.Fail("Expected token review to be skipped when validate-token is false, but it was performed")
+ }
+ if tokenReviewHandler.TokenReviewed == false && validateToken {
+ s.Fail("Expected token review to be performed when validate-token is true, but it was skipped")
+ }
+ })
+ })
+ _ = s.mcpClient.Close()
+ s.StopServer()
+ }
+}
+
+func (s *AuthorizationSuite) TestAuthorizationOidcTokenExchange() {
+ tokenReviewHandler := &test.TokenReviewHandler{}
+ s.MockServer.Handle(tokenReviewHandler)
+
+ oidcTestServer := NewOidcTestServer(s.T())
+ s.T().Cleanup(oidcTestServer.Close)
+ rawClaims := `{
+ "iss": "` + oidcTestServer.URL + `",
+ "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
+ "aud": "%s"
+ }`
+ validOidcClientToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256,
+ fmt.Sprintf(rawClaims, "mcp-server"))
+ validOidcBackendToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256,
+ fmt.Sprintf(rawClaims, "backend-audience"))
+ oidcTestServer.TokenEndpointHandler = func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = fmt.Fprintf(w, `{"access_token":"%s","token_type":"Bearer","expires_in":253402297199}`, validOidcBackendToken)
+ }
+
+ cases := []bool{false, true}
+ for _, validateToken := range cases {
+ s.OidcProvider = oidcTestServer.Provider
+ s.StaticConfig.OAuthAudience = "mcp-server"
+ s.StaticConfig.ValidateToken = validateToken
+ s.StaticConfig.StsClientId = "test-sts-client-id"
+ s.StaticConfig.StsClientSecret = "test-sts-client-secret"
+ s.StaticConfig.StsAudience = "backend-audience"
+ s.StaticConfig.StsScopes = []string{"backend-scope"}
+ s.StartServer()
+ s.StartClient(transport.WithHTTPHeaders(map[string]string{
+ "Authorization": "Bearer " + validOidcClientToken,
+ }))
+ tokenReviewHandler.TokenReviewed = false
+
+ s.Run(fmt.Sprintf("Protected resource with validate-token='%t'", validateToken), func() {
+ s.Run("Initialize returns OK for VALID OIDC EXCHANGE Authorization header", func() {
+ result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error creating initial request")
+ s.Require().NotNil(result, "Expected initial request to not be nil")
+ })
+
+ s.Run("Performs token validation accordingly for VALID OIDC EXCHANGE Authorization header", func() {
+ if tokenReviewHandler.TokenReviewed == true && !validateToken {
+ s.Fail("Expected token review to be skipped when validate-token is false, but it was performed")
+ }
+ if tokenReviewHandler.TokenReviewed == false && validateToken {
+ s.Fail("Expected token review to be performed when validate-token is true, but it was skipped")
+ }
+ })
+ })
+ _ = s.mcpClient.Close()
+ s.StopServer()
+ }
+}
+
+func TestAuthorization(t *testing.T) {
+ suite.Run(t, new(AuthorizationSuite))
+}
diff --git a/pkg/http/http_mcp_test.go b/pkg/http/http_mcp_test.go
new file mode 100644
index 00000000..2a79b4be
--- /dev/null
+++ b/pkg/http/http_mcp_test.go
@@ -0,0 +1,67 @@
+package http
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/mark3labs/mcp-go/client"
+ "github.com/mark3labs/mcp-go/client/transport"
+ "github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
+)
+
+type McpTransportSuite struct {
+ BaseHttpSuite
+}
+
+func (s *McpTransportSuite) SetupTest() {
+ s.BaseHttpSuite.SetupTest()
+ s.StartServer()
+}
+
+func (s *McpTransportSuite) TearDownTest() {
+ s.BaseHttpSuite.TearDownTest()
+}
+
+func (s *McpTransportSuite) TestSseTransport() {
+ sseClient, sseClientErr := client.NewSSEMCPClient(fmt.Sprintf("http://127.0.0.1:%d/sse", s.TcpAddr.Port))
+ s.Require().NoError(sseClientErr, "Expected no error creating SSE MCP client")
+ startErr := sseClient.Start(s.T().Context())
+ s.Require().NoError(startErr, "Expected no error starting SSE MCP client")
+ s.Run("Can Initialize Session", func() {
+ _, err := sseClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error initializing SSE MCP client")
+ })
+ s.Run("Can List Tools", func() {
+ tools, err := sseClient.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NoError(err, "Expected no error listing tools from SSE MCP client")
+ s.Greater(len(tools.Tools), 0, "Expected at least one tool from SSE MCP client")
+ })
+ s.Run("Can close SSE client", func() {
+ s.Require().NoError(sseClient.Close(), "Expected no error closing SSE MCP client")
+ })
+}
+
+func (s *McpTransportSuite) TestStreamableHttpTransport() {
+ httpClient, httpClientErr := client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), transport.WithContinuousListening())
+ s.Require().NoError(httpClientErr, "Expected no error creating Streamable HTTP MCP client")
+ startErr := httpClient.Start(s.T().Context())
+ s.Require().NoError(startErr, "Expected no error starting Streamable HTTP MCP client")
+ s.Run("Can Initialize Session", func() {
+ _, err := httpClient.Initialize(s.T().Context(), test.McpInitRequest())
+ s.Require().NoError(err, "Expected no error initializing Streamable HTTP MCP client")
+ })
+ s.Run("Can List Tools", func() {
+ tools, err := httpClient.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NoError(err, "Expected no error listing tools from Streamable HTTP MCP client")
+ s.Greater(len(tools.Tools), 0, "Expected at least one tool from Streamable HTTP MCP client")
+ })
+ s.Run("Can close Streamable HTTP client", func() {
+ s.Require().NoError(httpClient.Close(), "Expected no error closing Streamable HTTP MCP client")
+ })
+}
+
+func TestMcpTransport(t *testing.T) {
+ suite.Run(t, new(McpTransportSuite))
+}
diff --git a/pkg/http/http_test.go b/pkg/http/http_test.go
index 36e7f883..64c3355e 100644
--- a/pkg/http/http_test.go
+++ b/pkg/http/http_test.go
@@ -1,7 +1,6 @@
package http
import (
- "bufio"
"bytes"
"context"
"crypto/rand"
@@ -22,6 +21,7 @@ import (
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/coreos/go-oidc/v3/oidc/oidctest"
+ "github.com/stretchr/testify/suite"
"golang.org/x/sync/errgroup"
"k8s.io/klog/v2"
"k8s.io/klog/v2/textlogger"
@@ -30,6 +30,53 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/mcp"
)
+type BaseHttpSuite struct {
+ suite.Suite
+ MockServer *test.MockServer
+ TcpAddr *net.TCPAddr
+ StaticConfig *config.StaticConfig
+ mcpServer *mcp.Server
+ OidcProvider *oidc.Provider
+ timeoutCancel context.CancelFunc
+ StopServer context.CancelFunc
+ WaitForShutdown func() error
+}
+
+func (s *BaseHttpSuite) SetupTest() {
+ var err error
+ http.DefaultClient.Timeout = 10 * time.Second
+ s.MockServer = test.NewMockServer()
+ s.TcpAddr, err = test.RandomPortAddress()
+ s.Require().NoError(err, "Expected no error getting random port address")
+ s.StaticConfig = config.Default()
+ s.StaticConfig.KubeConfig = s.MockServer.KubeconfigFile(s.T())
+ s.StaticConfig.Port = strconv.Itoa(s.TcpAddr.Port)
+}
+
+func (s *BaseHttpSuite) StartServer() {
+ var err error
+ s.mcpServer, err = mcp.NewServer(mcp.Configuration{StaticConfig: s.StaticConfig})
+ s.Require().NoError(err, "Expected no error creating MCP server")
+ s.Require().NotNil(s.mcpServer, "MCP server should not be nil")
+ var timeoutCtx, cancelCtx context.Context
+ timeoutCtx, s.timeoutCancel = context.WithTimeout(s.T().Context(), 10*time.Second)
+ group, gc := errgroup.WithContext(timeoutCtx)
+ cancelCtx, s.StopServer = context.WithCancel(gc)
+ group.Go(func() error { return Serve(cancelCtx, s.mcpServer, s.StaticConfig, s.OidcProvider, nil) })
+ s.WaitForShutdown = group.Wait
+ s.Require().NoError(test.WaitForServer(s.TcpAddr), "HTTP server did not start in time")
+}
+
+func (s *BaseHttpSuite) TearDownTest() {
+ s.MockServer.Close()
+ if s.mcpServer != nil {
+ s.mcpServer.Close()
+ }
+ s.StopServer()
+ s.Require().NoError(s.WaitForShutdown(), "HTTP server did not shut down gracefully")
+ s.timeoutCancel()
+}
+
type httpContext struct {
klogState klog.State
mockServer *test.MockServer
@@ -42,20 +89,6 @@ type httpContext struct {
OidcProvider *oidc.Provider
}
-const tokenReviewSuccessful = `
- {
- "kind": "TokenReview",
- "apiVersion": "authentication.k8s.io/v1",
- "spec": {"token": "valid-token"},
- "status": {
- "authenticated": true,
- "user": {
- "username": "test-user",
- "groups": ["system:authenticated"]
- }
- }
- }`
-
func (c *httpContext) beforeEach(t *testing.T) {
t.Helper()
http.DefaultClient.Timeout = 10 * time.Second
@@ -89,7 +122,7 @@ func (c *httpContext) beforeEach(t *testing.T) {
timeoutCtx, c.timeoutCancel = context.WithTimeout(t.Context(), 10*time.Second)
group, gc := errgroup.WithContext(timeoutCtx)
cancelCtx, c.StopServer = context.WithCancel(gc)
- group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider) })
+ group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider, nil) })
c.WaitForShutdown = group.Wait
// Wait for HTTP server to start (using net)
for i := 0; i < 10; i++ {
@@ -192,92 +225,6 @@ func TestGracefulShutdown(t *testing.T) {
})
}
-func TestSseTransport(t *testing.T) {
- testCase(t, func(ctx *httpContext) {
- sseResp, sseErr := http.Get(fmt.Sprintf("http://%s/sse", ctx.HttpAddress))
- t.Cleanup(func() { _ = sseResp.Body.Close() })
- t.Run("Exposes SSE endpoint at /sse", func(t *testing.T) {
- if sseErr != nil {
- t.Fatalf("Failed to get SSE endpoint: %v", sseErr)
- }
- if sseResp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", sseResp.StatusCode)
- }
- })
- t.Run("SSE endpoint returns text/event-stream content type", func(t *testing.T) {
- if sseResp.Header.Get("Content-Type") != "text/event-stream" {
- t.Errorf("Expected Content-Type text/event-stream, got %s", sseResp.Header.Get("Content-Type"))
- }
- })
- responseReader := bufio.NewReader(sseResp.Body)
- event, eventErr := responseReader.ReadString('\n')
- endpoint, endpointErr := responseReader.ReadString('\n')
- t.Run("SSE endpoint returns stream with messages endpoint", func(t *testing.T) {
- if eventErr != nil {
- t.Fatalf("Failed to read SSE response body (event): %v", eventErr)
- }
- if event != "event: endpoint\n" {
- t.Errorf("Expected SSE event 'endpoint', got %s", event)
- }
- if endpointErr != nil {
- t.Fatalf("Failed to read SSE response body (endpoint): %v", endpointErr)
- }
- if !strings.HasPrefix(endpoint, "data: /message?sessionId=") {
- t.Errorf("Expected SSE data: '/message', got %s", endpoint)
- }
- })
- messageResp, messageErr := http.Post(
- fmt.Sprintf("http://%s/message?sessionId=%s", ctx.HttpAddress, strings.TrimSpace(endpoint[25:])),
- "application/json",
- bytes.NewBufferString("{}"),
- )
- t.Cleanup(func() { _ = messageResp.Body.Close() })
- t.Run("Exposes message endpoint at /message", func(t *testing.T) {
- if messageErr != nil {
- t.Fatalf("Failed to get message endpoint: %v", messageErr)
- }
- if messageResp.StatusCode != http.StatusAccepted {
- t.Errorf("Expected HTTP 202 OK, got %d", messageResp.StatusCode)
- }
- })
- })
-}
-
-func TestStreamableHttpTransport(t *testing.T) {
- testCase(t, func(ctx *httpContext) {
- mcpGetResp, mcpGetErr := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
- t.Cleanup(func() { _ = mcpGetResp.Body.Close() })
- t.Run("Exposes MCP GET endpoint at /mcp", func(t *testing.T) {
- if mcpGetErr != nil {
- t.Fatalf("Failed to get MCP endpoint: %v", mcpGetErr)
- }
- if mcpGetResp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", mcpGetResp.StatusCode)
- }
- })
- t.Run("MCP GET endpoint returns text/event-stream content type", func(t *testing.T) {
- if mcpGetResp.Header.Get("Content-Type") != "text/event-stream" {
- t.Errorf("Expected Content-Type text/event-stream (GET), got %s", mcpGetResp.Header.Get("Content-Type"))
- }
- })
- mcpPostResp, mcpPostErr := http.Post(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), "application/json", bytes.NewBufferString("{}"))
- t.Cleanup(func() { _ = mcpPostResp.Body.Close() })
- t.Run("Exposes MCP POST endpoint at /mcp", func(t *testing.T) {
- if mcpPostErr != nil {
- t.Fatalf("Failed to post to MCP endpoint: %v", mcpPostErr)
- }
- if mcpPostResp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", mcpPostResp.StatusCode)
- }
- })
- t.Run("MCP POST endpoint returns application/json content type", func(t *testing.T) {
- if mcpPostResp.Header.Get("Content-Type") != "application/json" {
- t.Errorf("Expected Content-Type application/json (POST), got %s", mcpPostResp.Header.Get("Content-Type"))
- }
- })
- })
-}
-
func TestHealthCheck(t *testing.T) {
testCase(t, func(ctx *httpContext) {
t.Run("Exposes health check endpoint at /healthz", func(t *testing.T) {
@@ -390,6 +337,122 @@ func TestWellKnownReverseProxy(t *testing.T) {
})
}
+func TestWellKnownHeaderPropagation(t *testing.T) {
+ cases := []string{
+ ".well-known/oauth-authorization-server",
+ ".well-known/oauth-protected-resource",
+ ".well-known/openid-configuration",
+ }
+ var receivedRequestHeaders http.Header
+ testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !strings.HasPrefix(r.URL.EscapedPath(), "/.well-known/") {
+ http.NotFound(w, r)
+ return
+ }
+ // Capture headers received from the proxy
+ receivedRequestHeaders = r.Header.Clone()
+ // Set response headers that should be propagated back
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("Access-Control-Allow-Origin", "https://example.com")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.Header().Set("X-Custom-Backend-Header", "backend-value")
+ _, _ = w.Write([]byte(`{"issuer": "https://example.com"}`))
+ }))
+ t.Cleanup(testServer.Close)
+ staticConfig := &config.StaticConfig{
+ AuthorizationURL: testServer.URL,
+ RequireOAuth: true,
+ ValidateToken: true,
+ ClusterProviderStrategy: config.ClusterProviderKubeConfig,
+ }
+ testCaseWithContext(t, &httpContext{StaticConfig: staticConfig}, func(ctx *httpContext) {
+ for _, path := range cases {
+ receivedRequestHeaders = nil
+ req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path), nil)
+ if err != nil {
+ t.Fatalf("Failed to create request: %v", err)
+ }
+ // Add various headers to test propagation
+ req.Header.Set("Origin", "https://example.com")
+ req.Header.Set("User-Agent", "Test-Agent/1.0")
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Accept-Language", "en-US")
+ req.Header.Set("X-Custom-Header", "custom-value")
+ req.Header.Set("Referer", "https://example.com/page")
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("Failed to get %s endpoint: %v", path, err)
+ }
+ t.Cleanup(func() { _ = resp.Body.Close() })
+
+ t.Run("Well-known proxy propagates Origin header to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders == nil {
+ t.Fatal("Backend did not receive any headers")
+ }
+ if receivedRequestHeaders.Get("Origin") != "https://example.com" {
+ t.Errorf("Expected Origin header 'https://example.com', got '%s'", receivedRequestHeaders.Get("Origin"))
+ }
+ })
+
+ t.Run("Well-known proxy propagates User-Agent header to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders.Get("User-Agent") != "Test-Agent/1.0" {
+ t.Errorf("Expected User-Agent header 'Test-Agent/1.0', got '%s'", receivedRequestHeaders.Get("User-Agent"))
+ }
+ })
+
+ t.Run("Well-known proxy propagates Accept header to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders.Get("Accept") != "application/json" {
+ t.Errorf("Expected Accept header 'application/json', got '%s'", receivedRequestHeaders.Get("Accept"))
+ }
+ })
+
+ t.Run("Well-known proxy propagates Accept-Language header to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders.Get("Accept-Language") != "en-US" {
+ t.Errorf("Expected Accept-Language header 'en-US', got '%s'", receivedRequestHeaders.Get("Accept-Language"))
+ }
+ })
+
+ t.Run("Well-known proxy propagates custom headers to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders.Get("X-Custom-Header") != "custom-value" {
+ t.Errorf("Expected X-Custom-Header 'custom-value', got '%s'", receivedRequestHeaders.Get("X-Custom-Header"))
+ }
+ })
+
+ t.Run("Well-known proxy propagates Referer header to backend for "+path, func(t *testing.T) {
+ if receivedRequestHeaders.Get("Referer") != "https://example.com/page" {
+ t.Errorf("Expected Referer header 'https://example.com/page', got '%s'", receivedRequestHeaders.Get("Referer"))
+ }
+ })
+
+ t.Run("Well-known proxy returns Access-Control-Allow-Origin from backend for "+path, func(t *testing.T) {
+ if resp.Header.Get("Access-Control-Allow-Origin") != "https://example.com" {
+ t.Errorf("Expected Access-Control-Allow-Origin header 'https://example.com', got '%s'", resp.Header.Get("Access-Control-Allow-Origin"))
+ }
+ })
+
+ t.Run("Well-known proxy returns Access-Control-Allow-Methods from backend for "+path, func(t *testing.T) {
+ if resp.Header.Get("Access-Control-Allow-Methods") != "GET, POST, OPTIONS" {
+ t.Errorf("Expected Access-Control-Allow-Methods header 'GET, POST, OPTIONS', got '%s'", resp.Header.Get("Access-Control-Allow-Methods"))
+ }
+ })
+
+ t.Run("Well-known proxy returns Cache-Control from backend for "+path, func(t *testing.T) {
+ if resp.Header.Get("Cache-Control") != "no-cache" {
+ t.Errorf("Expected Cache-Control header 'no-cache', got '%s'", resp.Header.Get("Cache-Control"))
+ }
+ })
+
+ t.Run("Well-known proxy returns custom response headers from backend for "+path, func(t *testing.T) {
+ if resp.Header.Get("X-Custom-Backend-Header") != "backend-value" {
+ t.Errorf("Expected X-Custom-Backend-Header 'backend-value', got '%s'", resp.Header.Get("X-Custom-Backend-Header"))
+ }
+ })
+ }
+ })
+}
+
func TestWellKnownOverrides(t *testing.T) {
cases := []string{
".well-known/oauth-authorization-server",
@@ -500,396 +563,3 @@ func TestMiddlewareLogging(t *testing.T) {
})
})
}
-
-func TestAuthorizationUnauthorized(t *testing.T) {
- // Missing Authorization header
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with MISSING Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with MISSING Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with MISSING Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") {
- t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Authorization header without Bearer prefix
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Basic YWxhZGRpbjpvcGVuc2VzYW1l")
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with INCOMPATIBLE Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with INCOMPATIBLE Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") {
- t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Invalid Authorization header
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid"))
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with INVALID Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with INVALID Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with INVALID Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") ||
- !strings.Contains(ctx.LogBuffer.String(), "error: failed to parse JWT token: illegal base64 data") {
- t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Expired Authorization Bearer token
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+tokenBasicExpired)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with EXPIRED Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with EXPIRED Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with EXPIRED Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") ||
- !strings.Contains(ctx.LogBuffer.String(), "validation failed, token is expired (exp)") {
- t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Invalid audience claim Bearer token
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+tokenBasicExpired)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with INVALID AUDIENCE Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with INVALID AUDIENCE Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", audience="expected-audience", error="invalid_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with INVALID AUDIENCE Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") ||
- !strings.Contains(ctx.LogBuffer.String(), "invalid audience claim (aud)") {
- t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Failed OIDC validation
- oidcTestServer := NewOidcTestServer(t)
- t.Cleanup(oidcTestServer.Close)
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with INVALID OIDC Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with INVALID OIDC Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with INVALID OIDC Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") ||
- !strings.Contains(ctx.LogBuffer.String(), "OIDC token validation error: failed to verify signature") {
- t.Errorf("Expected log entry for OIDC validation error, got: %s", ctx.LogBuffer.String())
- }
- })
- })
- // Failed Kubernetes TokenReview
- rawClaims := `{
- "iss": "` + oidcTestServer.URL + `",
- "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
- "aud": "mcp-server"
- }`
- validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+validOidcToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close })
- t.Run("Protected resource with INVALID KUBERNETES Authorization header returns 401 - Unauthorized", func(t *testing.T) {
- if resp.StatusCode != 401 {
- t.Errorf("Expected HTTP 401, got %d", resp.StatusCode)
- }
- })
- t.Run("Protected resource with INVALID KUBERNETES Authorization header returns WWW-Authenticate header", func(t *testing.T) {
- authHeader := resp.Header.Get("WWW-Authenticate")
- expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"`
- if authHeader != expected {
- t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader)
- }
- })
- t.Run("Protected resource with INVALID KUBERNETES Authorization header logs error", func(t *testing.T) {
- if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") ||
- !strings.Contains(ctx.LogBuffer.String(), "kubernetes API token validation error: failed to create token review") {
- t.Errorf("Expected log entry for Kubernetes TokenReview error, got: %s", ctx.LogBuffer.String())
- }
- })
- })
-}
-
-func TestAuthorizationRequireOAuthFalse(t *testing.T) {
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close() })
- t.Run("Protected resource with MISSING Authorization header returns 200 - OK)", func(t *testing.T) {
- if resp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode)
- }
- })
- })
-}
-
-func TestAuthorizationRawToken(t *testing.T) {
- cases := []struct {
- audience string
- validateToken bool
- }{
- {"", false}, // No audience, no validation
- {"", true}, // No audience, validation enabled
- {"mcp-server", false}, // Audience set, no validation
- {"mcp-server", true}, // Audience set, validation enabled
- }
- for _, c := range cases {
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
- tokenReviewed := false
- ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
- w.Header().Set("Content-Type", "application/json")
- _, _ = w.Write([]byte(tokenReviewSuccessful))
- tokenReviewed = true
- return
- }
- }))
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close() })
- t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header returns 200 - OK", c.audience, c.validateToken), func(t *testing.T) {
- if resp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode)
- }
- })
- t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header performs token validation accordingly", c.audience, c.validateToken), func(t *testing.T) {
- if tokenReviewed == true && !c.validateToken {
- t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed")
- }
- if tokenReviewed == false && c.validateToken {
- t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped")
- }
- })
- })
- }
-
-}
-
-func TestAuthorizationOidcToken(t *testing.T) {
- oidcTestServer := NewOidcTestServer(t)
- t.Cleanup(oidcTestServer.Close)
- rawClaims := `{
- "iss": "` + oidcTestServer.URL + `",
- "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
- "aud": "mcp-server"
- }`
- validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
- cases := []bool{false, true}
- for _, validateToken := range cases {
- testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
- tokenReviewed := false
- ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
- w.Header().Set("Content-Type", "application/json")
- _, _ = w.Write([]byte(tokenReviewSuccessful))
- tokenReviewed = true
- return
- }
- }))
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+validOidcToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close() })
- t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header returns 200 - OK", validateToken), func(t *testing.T) {
- if resp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode)
- }
- })
- t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header performs token validation accordingly", validateToken), func(t *testing.T) {
- if tokenReviewed == true && !validateToken {
- t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed")
- }
- if tokenReviewed == false && validateToken {
- t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped")
- }
- })
- })
- }
-}
-
-func TestAuthorizationOidcTokenExchange(t *testing.T) {
- oidcTestServer := NewOidcTestServer(t)
- t.Cleanup(oidcTestServer.Close)
- rawClaims := `{
- "iss": "` + oidcTestServer.URL + `",
- "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `,
- "aud": "%s"
- }`
- validOidcClientToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256,
- fmt.Sprintf(rawClaims, "mcp-server"))
- validOidcBackendToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256,
- fmt.Sprintf(rawClaims, "backend-audience"))
- oidcTestServer.TokenEndpointHandler = func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- _, _ = fmt.Fprintf(w, `{"access_token":"%s","token_type":"Bearer","expires_in":253402297199}`, validOidcBackendToken)
- }
- cases := []bool{false, true}
- for _, validateToken := range cases {
- staticConfig := &config.StaticConfig{
- RequireOAuth: true,
- OAuthAudience: "mcp-server",
- ValidateToken: validateToken,
- StsClientId: "test-sts-client-id",
- StsClientSecret: "test-sts-client-secret",
- StsAudience: "backend-audience",
- StsScopes: []string{"backend-scope"},
- ClusterProviderStrategy: config.ClusterProviderKubeConfig,
- }
- testCaseWithContext(t, &httpContext{StaticConfig: staticConfig, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
- tokenReviewed := false
- ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
- w.Header().Set("Content-Type", "application/json")
- _, _ = w.Write([]byte(tokenReviewSuccessful))
- tokenReviewed = true
- return
- }
- }))
- req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- req.Header.Set("Authorization", "Bearer "+validOidcClientToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to get protected endpoint: %v", err)
- }
- t.Cleanup(func() { _ = resp.Body.Close() })
- t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header returns 200 - OK", validateToken), func(t *testing.T) {
- if resp.StatusCode != http.StatusOK {
- t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode)
- }
- })
- t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header performs token validation accordingly", validateToken), func(t *testing.T) {
- if tokenReviewed == true && !validateToken {
- t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed")
- }
- if tokenReviewed == false && validateToken {
- t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped")
- }
- })
- })
- }
-}
diff --git a/pkg/http/wellknown.go b/pkg/http/wellknown.go
index 0d80221e..01ff3092 100644
--- a/pkg/http/wellknown.go
+++ b/pkg/http/wellknown.go
@@ -25,19 +25,24 @@ type WellKnown struct {
authorizationUrl string
scopesSupported []string
disableDynamicClientRegistration bool
+ httpClient *http.Client
}
var _ http.Handler = &WellKnown{}
-func WellKnownHandler(staticConfig *config.StaticConfig) http.Handler {
+func WellKnownHandler(staticConfig *config.StaticConfig, httpClient *http.Client) http.Handler {
authorizationUrl := staticConfig.AuthorizationURL
- if authorizationUrl != "" && strings.HasSuffix("authorizationUrl", "/") {
+ if authorizationUrl != "" && strings.HasSuffix(authorizationUrl, "/") {
authorizationUrl = strings.TrimSuffix(authorizationUrl, "/")
}
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
return &WellKnown{
authorizationUrl: authorizationUrl,
disableDynamicClientRegistration: staticConfig.DisableDynamicClientRegistration,
scopesSupported: staticConfig.OAuthScopes,
+ httpClient: httpClient,
}
}
@@ -51,7 +56,12 @@ func (w WellKnown) ServeHTTP(writer http.ResponseWriter, request *http.Request)
http.Error(writer, "Failed to create request: "+err.Error(), http.StatusInternalServerError)
return
}
- resp, err := http.DefaultClient.Do(req.WithContext(request.Context()))
+ for key, values := range request.Header {
+ for _, value := range values {
+ req.Header.Add(key, value)
+ }
+ }
+ resp, err := w.httpClient.Do(req.WithContext(request.Context()))
if err != nil {
http.Error(writer, "Failed to perform request: "+err.Error(), http.StatusInternalServerError)
return
diff --git a/pkg/kubernetes-mcp-server/cmd/root.go b/pkg/kubernetes-mcp-server/cmd/root.go
index 1e91d0c4..db1782ab 100644
--- a/pkg/kubernetes-mcp-server/cmd/root.go
+++ b/pkg/kubernetes-mcp-server/cmd/root.go
@@ -301,10 +301,11 @@ func (m *MCPServerOptions) Run() error {
}
var oidcProvider *oidc.Provider
+ var httpClient *http.Client
if m.StaticConfig.AuthorizationURL != "" {
ctx := context.Background()
if m.StaticConfig.CertificateAuthority != "" {
- httpClient := &http.Client{}
+ httpClient = &http.Client{}
caCert, err := os.ReadFile(m.StaticConfig.CertificateAuthority)
if err != nil {
return fmt.Errorf("failed to read CA certificate from %s: %w", m.StaticConfig.CertificateAuthority, err)
@@ -341,7 +342,7 @@ func (m *MCPServerOptions) Run() error {
if m.StaticConfig.Port != "" {
ctx := context.Background()
- return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider)
+ return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider, httpClient)
}
if err := mcpServer.ServeStdio(); err != nil && !errors.Is(err, context.Canceled) {
diff --git a/pkg/kubernetes/accesscontrol_clientset.go b/pkg/kubernetes/accesscontrol_clientset.go
index ed875c64..a6c3fccd 100644
--- a/pkg/kubernetes/accesscontrol_clientset.go
+++ b/pkg/kubernetes/accesscontrol_clientset.go
@@ -39,6 +39,69 @@ func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface
return a.discoveryClient
}
+func (a *AccessControlClientset) Nodes() (corev1.NodeInterface, error) {
+ gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
+ if !isAllowed(a.staticConfig, gvk) {
+ return nil, isNotAllowedError(gvk)
+ }
+ return a.delegate.CoreV1().Nodes(), nil
+}
+
+func (a *AccessControlClientset) NodesLogs(ctx context.Context, name string) (*rest.Request, error) {
+ gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
+ if !isAllowed(a.staticConfig, gvk) {
+ return nil, isNotAllowedError(gvk)
+ }
+
+ if _, err := a.delegate.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
+ return nil, fmt.Errorf("failed to get node %s: %w", name, err)
+ }
+
+ url := []string{"api", "v1", "nodes", name, "proxy", "logs"}
+ return a.delegate.CoreV1().RESTClient().
+ Get().
+ AbsPath(url...), nil
+}
+
+func (a *AccessControlClientset) NodesMetricses(ctx context.Context, name string, listOptions metav1.ListOptions) (*metrics.NodeMetricsList, error) {
+ gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "NodeMetrics"}
+ if !isAllowed(a.staticConfig, gvk) {
+ return nil, isNotAllowedError(gvk)
+ }
+ versionedMetrics := &metricsv1beta1api.NodeMetricsList{}
+ var err error
+ if name != "" {
+ m, err := a.metricsV1beta1.NodeMetricses().Get(ctx, name, metav1.GetOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metrics for node %s: %w", name, err)
+ }
+ versionedMetrics.Items = []metricsv1beta1api.NodeMetrics{*m}
+ } else {
+ versionedMetrics, err = a.metricsV1beta1.NodeMetricses().List(ctx, listOptions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list node metrics: %w", err)
+ }
+ }
+ convertedMetrics := &metrics.NodeMetricsList{}
+ return convertedMetrics, metricsv1beta1api.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, convertedMetrics, nil)
+}
+
+func (a *AccessControlClientset) NodesStatsSummary(ctx context.Context, name string) (*rest.Request, error) {
+ gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
+ if !isAllowed(a.staticConfig, gvk) {
+ return nil, isNotAllowedError(gvk)
+ }
+
+ if _, err := a.delegate.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
+ return nil, fmt.Errorf("failed to get node %s: %w", name, err)
+ }
+
+ url := []string{"api", "v1", "nodes", name, "proxy", "stats", "summary"}
+ return a.delegate.CoreV1().RESTClient().
+ Get().
+ AbsPath(url...), nil
+}
+
func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) {
gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
if !isAllowed(a.staticConfig, gvk) {
diff --git a/pkg/kubernetes/configuration.go b/pkg/kubernetes/configuration.go
index ff521a2a..71fd2dd2 100644
--- a/pkg/kubernetes/configuration.go
+++ b/pkg/kubernetes/configuration.go
@@ -1,9 +1,9 @@
package kubernetes
import (
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/clientcmd/api/latest"
)
@@ -22,73 +22,22 @@ var InClusterConfig = func() (*rest.Config, error) {
return inClusterConfig, err
}
-// resolveKubernetesConfigurations resolves the required kubernetes configurations and sets them in the Kubernetes struct
-func resolveKubernetesConfigurations(kubernetes *Manager) error {
- // Always set clientCmdConfig
- pathOptions := clientcmd.NewDefaultPathOptions()
- if kubernetes.staticConfig.KubeConfig != "" {
- pathOptions.LoadingRules.ExplicitPath = kubernetes.staticConfig.KubeConfig
- }
- kubernetes.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
- pathOptions.LoadingRules,
- &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}})
- var err error
- if kubernetes.IsInCluster() {
- kubernetes.cfg, err = InClusterConfig()
- if err == nil && kubernetes.cfg != nil {
- return nil
- }
- }
- // Out of cluster
- kubernetes.cfg, err = kubernetes.clientCmdConfig.ClientConfig()
- if kubernetes.cfg != nil && kubernetes.cfg.UserAgent == "" {
- kubernetes.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
- }
- return err
-}
-
-func (m *Manager) IsInCluster() bool {
- if m.staticConfig.KubeConfig != "" {
+func IsInCluster(cfg *config.StaticConfig) bool {
+ // Even if running in-cluster, if a kubeconfig is provided, we consider it as out-of-cluster
+ if cfg != nil && cfg.KubeConfig != "" {
return false
}
- cfg, err := InClusterConfig()
- return err == nil && cfg != nil
-}
-
-func (m *Manager) configuredNamespace() string {
- if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
- return ns
- }
- return ""
-}
-
-func (m *Manager) NamespaceOrDefault(namespace string) string {
- if namespace == "" {
- return m.configuredNamespace()
- }
- return namespace
+ restConfig, err := InClusterConfig()
+ return err == nil && restConfig != nil
}
func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
return k.manager.NamespaceOrDefault(namespace)
}
-// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
-func (m *Manager) ToRESTConfig() (*rest.Config, error) {
- return m.cfg, nil
-}
-
-// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
-func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
- return m.clientCmdConfig
-}
-
// ConfigurationContextsDefault returns the current context name
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
- if k.manager.IsInCluster() {
- return inClusterKubeConfigDefaultContext, nil
- }
cfg, err := k.manager.clientCmdConfig.RawConfig()
if err != nil {
return "", err
@@ -99,9 +48,6 @@ func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
// ConfigurationContextsList returns the list of available context names
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
- if k.manager.IsInCluster() {
- return map[string]string{inClusterKubeConfigDefaultContext: ""}, nil
- }
cfg, err := k.manager.clientCmdConfig.RawConfig()
if err != nil {
return nil, err
@@ -125,21 +71,7 @@ func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
var cfg clientcmdapi.Config
var err error
- if k.manager.IsInCluster() {
- cfg = *clientcmdapi.NewConfig()
- cfg.Clusters["cluster"] = &clientcmdapi.Cluster{
- Server: k.manager.cfg.Host,
- InsecureSkipTLSVerify: k.manager.cfg.Insecure,
- }
- cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{
- Token: k.manager.cfg.BearerToken,
- }
- cfg.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
- Cluster: "cluster",
- AuthInfo: "user",
- }
- cfg.CurrentContext = inClusterKubeConfigDefaultContext
- } else if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
+ if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
return nil, err
}
if minify {
diff --git a/pkg/kubernetes/configuration_test.go b/pkg/kubernetes/configuration_test.go
deleted file mode 100644
index 084b99d7..00000000
--- a/pkg/kubernetes/configuration_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package kubernetes
-
-import (
- "errors"
- "os"
- "path"
- "runtime"
- "strings"
- "testing"
-
- "k8s.io/client-go/rest"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
-)
-
-func TestKubernetes_IsInCluster(t *testing.T) {
- t.Run("with explicit kubeconfig", func(t *testing.T) {
- m := Manager{
- staticConfig: &config.StaticConfig{
- KubeConfig: "kubeconfig",
- },
- }
- if m.IsInCluster() {
- t.Errorf("expected not in cluster, got in cluster")
- }
- })
- t.Run("with empty kubeconfig and in cluster", func(t *testing.T) {
- originalFunction := InClusterConfig
- InClusterConfig = func() (*rest.Config, error) {
- return &rest.Config{}, nil
- }
- defer func() {
- InClusterConfig = originalFunction
- }()
- m := Manager{
- staticConfig: &config.StaticConfig{
- KubeConfig: "",
- },
- }
- if !m.IsInCluster() {
- t.Errorf("expected in cluster, got not in cluster")
- }
- })
- t.Run("with empty kubeconfig and not in cluster (empty)", func(t *testing.T) {
- originalFunction := InClusterConfig
- InClusterConfig = func() (*rest.Config, error) {
- return nil, nil
- }
- defer func() {
- InClusterConfig = originalFunction
- }()
- m := Manager{
- staticConfig: &config.StaticConfig{
- KubeConfig: "",
- },
- }
- if m.IsInCluster() {
- t.Errorf("expected not in cluster, got in cluster")
- }
- })
- t.Run("with empty kubeconfig and not in cluster (error)", func(t *testing.T) {
- originalFunction := InClusterConfig
- InClusterConfig = func() (*rest.Config, error) {
- return nil, errors.New("error")
- }
- defer func() {
- InClusterConfig = originalFunction
- }()
- m := Manager{
- staticConfig: &config.StaticConfig{
- KubeConfig: "",
- },
- }
- if m.IsInCluster() {
- t.Errorf("expected not in cluster, got in cluster")
- }
- })
-}
-
-func TestKubernetes_ResolveKubernetesConfigurations_Explicit(t *testing.T) {
- t.Run("with missing file", func(t *testing.T) {
- if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
- t.Skip("Skipping test on non-linux platforms")
- }
- tempDir := t.TempDir()
- m := Manager{staticConfig: &config.StaticConfig{
- KubeConfig: path.Join(tempDir, "config"),
- }}
- err := resolveKubernetesConfigurations(&m)
- if err == nil {
- t.Errorf("expected error, got nil")
- }
- if !errors.Is(err, os.ErrNotExist) {
- t.Errorf("expected file not found error, got %v", err)
- }
- if !strings.HasSuffix(err.Error(), ": no such file or directory") {
- t.Errorf("expected file not found error, got %v", err)
- }
- })
- t.Run("with empty file", func(t *testing.T) {
- tempDir := t.TempDir()
- kubeconfigPath := path.Join(tempDir, "config")
- if err := os.WriteFile(kubeconfigPath, []byte(""), 0644); err != nil {
- t.Fatalf("failed to create kubeconfig file: %v", err)
- }
- m := Manager{staticConfig: &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- }}
- err := resolveKubernetesConfigurations(&m)
- if err == nil {
- t.Errorf("expected error, got nil")
- }
- if !strings.Contains(err.Error(), "no configuration has been provided") {
- t.Errorf("expected no kubeconfig error, got %v", err)
- }
- })
- t.Run("with valid file", func(t *testing.T) {
- tempDir := t.TempDir()
- kubeconfigPath := path.Join(tempDir, "config")
- kubeconfigContent := `
-apiVersion: v1
-kind: Config
-clusters:
-- cluster:
- server: https://example.com
- name: example-cluster
-contexts:
-- context:
- cluster: example-cluster
- user: example-user
- name: example-context
-current-context: example-context
-users:
-- name: example-user
- user:
- token: example-token
-`
- if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
- t.Fatalf("failed to create kubeconfig file: %v", err)
- }
- m := Manager{staticConfig: &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- }}
- err := resolveKubernetesConfigurations(&m)
- if err != nil {
- t.Fatalf("expected no error, got %v", err)
- }
- if m.cfg == nil {
- t.Errorf("expected non-nil config, got nil")
- }
- if m.cfg.Host != "https://example.com" {
- t.Errorf("expected host https://example.com, got %s", m.cfg.Host)
- }
- })
-}
diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go
index 6cb770eb..3b5733e1 100644
--- a/pkg/kubernetes/kubernetes.go
+++ b/pkg/kubernetes/kubernetes.go
@@ -1,27 +1,10 @@
package kubernetes
import (
- "context"
- "errors"
- "strings"
-
"k8s.io/apimachinery/pkg/runtime"
- "github.com/fsnotify/fsnotify"
-
- "k8s.io/apimachinery/pkg/api/meta"
- "k8s.io/client-go/discovery"
- "k8s.io/client-go/discovery/cached/memory"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/restmapper"
- "k8s.io/client-go/tools/clientcmd"
- clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
- "k8s.io/klog/v2"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
+ "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
@@ -47,174 +30,9 @@ func (k *Kubernetes) AccessControlClientset() *AccessControlClientset {
return k.manager.accessControlClientSet
}
-type Manager struct {
- cfg *rest.Config
- clientCmdConfig clientcmd.ClientConfig
- discoveryClient discovery.CachedDiscoveryInterface
- accessControlClientSet *AccessControlClientset
- accessControlRESTMapper *AccessControlRESTMapper
- dynamicClient *dynamic.DynamicClient
-
- staticConfig *config.StaticConfig
- CloseWatchKubeConfig CloseWatchKubeConfig
-}
-
-var _ helm.Kubernetes = (*Manager)(nil)
-var _ Openshift = (*Manager)(nil)
-
var Scheme = scheme.Scheme
var ParameterCodec = runtime.NewParameterCodec(Scheme)
-func NewManager(config *config.StaticConfig) (*Manager, error) {
- k8s := &Manager{
- staticConfig: config,
- }
- if err := resolveKubernetesConfigurations(k8s); err != nil {
- return nil, err
- }
- // TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
- //k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
- // return &impersonateRoundTripper{original}
- //})
- var err error
- k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
- if err != nil {
- return nil, err
- }
- k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
- k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
- restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
- k8s.staticConfig,
- )
- k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
- if err != nil {
- return nil, err
- }
- return k8s, nil
-}
-
-func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
- if m.clientCmdConfig == nil {
- return
- }
- kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
- if len(kubeConfigFiles) == 0 {
- return
- }
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- return
- }
- for _, file := range kubeConfigFiles {
- _ = watcher.Add(file)
- }
- go func() {
- for {
- select {
- case _, ok := <-watcher.Events:
- if !ok {
- return
- }
- _ = onKubeConfigChange()
- case _, ok := <-watcher.Errors:
- if !ok {
- return
- }
- }
- }
- }()
- if m.CloseWatchKubeConfig != nil {
- _ = m.CloseWatchKubeConfig()
- }
- m.CloseWatchKubeConfig = watcher.Close
-}
-
-func (m *Manager) Close() {
- if m.CloseWatchKubeConfig != nil {
- _ = m.CloseWatchKubeConfig()
- }
-}
-
-func (m *Manager) GetAPIServerHost() string {
- if m.cfg == nil {
- return ""
- }
- return m.cfg.Host
-}
-
-func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
- return m.discoveryClient, nil
-}
-
-func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
- return m.accessControlRESTMapper, nil
-}
-
-func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
- authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
- if !ok || !strings.HasPrefix(authorization, "Bearer ") {
- if m.staticConfig.RequireOAuth {
- return nil, errors.New("oauth token required")
- }
- return &Kubernetes{manager: m}, nil
- }
- klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
- derivedCfg := &rest.Config{
- Host: m.cfg.Host,
- APIPath: m.cfg.APIPath,
- // Copy only server verification TLS settings (CA bundle and server name)
- TLSClientConfig: rest.TLSClientConfig{
- Insecure: m.cfg.Insecure,
- ServerName: m.cfg.ServerName,
- CAFile: m.cfg.CAFile,
- CAData: m.cfg.CAData,
- },
- BearerToken: strings.TrimPrefix(authorization, "Bearer "),
- // pass custom UserAgent to identify the client
- UserAgent: CustomUserAgent,
- QPS: m.cfg.QPS,
- Burst: m.cfg.Burst,
- Timeout: m.cfg.Timeout,
- Impersonate: rest.ImpersonationConfig{},
- }
- clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
- if err != nil {
- if m.staticConfig.RequireOAuth {
- klog.Errorf("failed to get kubeconfig: %v", err)
- return nil, errors.New("failed to get kubeconfig")
- }
- return &Kubernetes{manager: m}, nil
- }
- clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
- derived := &Kubernetes{manager: &Manager{
- clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
- cfg: derivedCfg,
- staticConfig: m.staticConfig,
- }}
- derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
- if err != nil {
- if m.staticConfig.RequireOAuth {
- klog.Errorf("failed to get kubeconfig: %v", err)
- return nil, errors.New("failed to get kubeconfig")
- }
- return &Kubernetes{manager: m}, nil
- }
- derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
- derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
- restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
- derived.manager.staticConfig,
- )
- derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
- if err != nil {
- if m.staticConfig.RequireOAuth {
- klog.Errorf("failed to initialize dynamic client: %v", err)
- return nil, errors.New("failed to initialize dynamic client")
- }
- return &Kubernetes{manager: m}, nil
- }
- return derived, nil
-}
-
func (k *Kubernetes) NewHelm() *helm.Helm {
// This is a derived Kubernetes, so it already has the Helm initialized
return helm.NewHelm(k.manager)
diff --git a/pkg/kubernetes/kubernetes_derived_test.go b/pkg/kubernetes/kubernetes_derived_test.go
new file mode 100644
index 00000000..69d4ef33
--- /dev/null
+++ b/pkg/kubernetes/kubernetes_derived_test.go
@@ -0,0 +1,185 @@
+package kubernetes
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
+)
+
+type DerivedTestSuite struct {
+ suite.Suite
+}
+
+func (s *DerivedTestSuite) TestKubeConfig() {
+ // Create a temporary kubeconfig file for testing
+ tempDir := s.T().TempDir()
+ kubeconfigPath := filepath.Join(tempDir, "config")
+ kubeconfigContent := `
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ server: https://test-cluster.example.com
+ name: test-cluster
+contexts:
+- context:
+ cluster: test-cluster
+ user: test-user
+ name: test-context
+current-context: test-context
+users:
+- name: test-user
+ user:
+ username: test-username
+ password: test-password
+`
+ err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644)
+ s.Require().NoError(err, "failed to create kubeconfig file")
+
+ s.Run("with no RequireOAuth (default) config", func() {
+ testStaticConfig := test.Must(config.ReadToml([]byte(`
+ kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
+ `)))
+ s.Run("without authorization header returns original manager", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ derived, err := testManager.Derived(s.T().Context())
+ s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
+
+ s.Equal(derived.manager, testManager, "expected original manager, got different manager")
+ })
+
+ s.Run("with invalid authorization header returns original manager", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
+ derived, err := testManager.Derived(ctx)
+ s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
+
+ s.Equal(derived.manager, testManager, "expected original manager, got different manager")
+ })
+
+ s.Run("with valid bearer token creates derived manager with correct configuration", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
+ derived, err := testManager.Derived(ctx)
+ s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
+
+ s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
+ s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
+
+ s.Run("RestConfig is correctly copied and sensitive fields are omitted", func() {
+ derivedCfg := derived.manager.cfg
+ s.Require().NotNil(derivedCfg, "derived config is nil")
+
+ originalCfg := testManager.cfg
+ s.Equalf(originalCfg.Host, derivedCfg.Host, "expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
+ s.Equalf(originalCfg.APIPath, derivedCfg.APIPath, "expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
+ s.Equalf(originalCfg.QPS, derivedCfg.QPS, "expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
+ s.Equalf(originalCfg.Burst, derivedCfg.Burst, "expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
+ s.Equalf(originalCfg.Timeout, derivedCfg.Timeout, "expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
+
+ s.Equalf(originalCfg.Insecure, derivedCfg.Insecure, "expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
+ s.Equalf(originalCfg.ServerName, derivedCfg.ServerName, "expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
+ s.Equalf(originalCfg.CAFile, derivedCfg.CAFile, "expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
+ s.Equalf(string(originalCfg.CAData), string(derivedCfg.CAData), "expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
+
+ s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
+ s.Equalf("kubernetes-mcp-server/bearer-token-auth", derivedCfg.UserAgent, "expected UserAgent \"kubernetes-mcp-server/bearer-token-auth\", got %s", derivedCfg.UserAgent)
+
+ // Verify that sensitive fields are NOT copied to prevent credential leakage
+ // The derived config should only use the bearer token from the Authorization header
+ // and not inherit any authentication credentials from the original kubeconfig
+ s.Emptyf(derivedCfg.CertFile, "expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
+ s.Emptyf(derivedCfg.KeyFile, "expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
+ s.Emptyf(len(derivedCfg.CertData), "expected TLS CertData to be empty, got %v", derivedCfg.CertData)
+ s.Emptyf(len(derivedCfg.KeyData), "expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
+
+ s.Emptyf(derivedCfg.Username, "expected Username to be empty, got %s", derivedCfg.Username)
+ s.Emptyf(derivedCfg.Password, "expected Password to be empty, got %s", derivedCfg.Password)
+ s.Nilf(derivedCfg.AuthProvider, "expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
+ s.Nilf(derivedCfg.ExecProvider, "expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
+ s.Emptyf(derivedCfg.BearerTokenFile, "expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
+ s.Emptyf(derivedCfg.Impersonate.UserName, "expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
+
+ // Verify that the original manager still has the sensitive data
+ s.Falsef(originalCfg.Username == "" && originalCfg.Password == "", "original kubeconfig shouldn't be modified")
+
+ })
+ s.Run("derived manager has initialized clients", func() {
+ // Verify that the derived manager has proper clients initialized
+ s.NotNilf(derived.manager.accessControlClientSet, "expected accessControlClientSet to be initialized")
+ s.Equalf(testStaticConfig, derived.manager.accessControlClientSet.staticConfig, "staticConfig not properly wired to derived manager")
+ s.NotNilf(derived.manager.discoveryClient, "expected discoveryClient to be initialized")
+ s.NotNilf(derived.manager.accessControlRESTMapper, "expected accessControlRESTMapper to be initialized")
+ s.Equalf(testStaticConfig, derived.manager.accessControlRESTMapper.staticConfig, "staticConfig not properly wired to derived manager")
+ s.NotNilf(derived.manager.dynamicClient, "expected dynamicClient to be initialized")
+ })
+ })
+ })
+
+ s.Run("with RequireOAuth=true", func() {
+ testStaticConfig := test.Must(config.ReadToml([]byte(`
+ kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
+ require_oauth = true
+ `)))
+
+ s.Run("with no authorization header returns oauth token required error", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ derived, err := testManager.Derived(s.T().Context())
+ s.Require().Error(err, "expected error for missing oauth token, got nil")
+ s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
+ s.Nil(derived, "expected nil derived manager when oauth token required")
+ })
+
+ s.Run("with invalid authorization header returns oauth token required error", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
+ derived, err := testManager.Derived(ctx)
+ s.Require().Error(err, "expected error for invalid oauth token, got nil")
+ s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
+ s.Nil(derived, "expected nil derived manager when oauth token required")
+ })
+
+ s.Run("with valid bearer token creates derived manager", func() {
+ testManager, err := NewKubeconfigManager(testStaticConfig, "")
+ s.Require().NoErrorf(err, "failed to create test manager: %v", err)
+ s.T().Cleanup(testManager.Close)
+
+ ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
+ derived, err := testManager.Derived(ctx)
+ s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
+
+ s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
+ s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
+
+ derivedCfg := derived.manager.cfg
+ s.Require().NotNil(derivedCfg, "derived config is nil")
+
+ s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
+ })
+ })
+}
+
+func TestDerived(t *testing.T) {
+ suite.Run(t, new(DerivedTestSuite))
+}
diff --git a/pkg/kubernetes/kubernetes_test.go b/pkg/kubernetes/kubernetes_test.go
deleted file mode 100644
index 2051ed48..00000000
--- a/pkg/kubernetes/kubernetes_test.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package kubernetes
-
-import (
- "context"
- "os"
- "path"
- "testing"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
-)
-
-func TestManager_Derived(t *testing.T) {
- // Create a temporary kubeconfig file for testing
- tempDir := t.TempDir()
- kubeconfigPath := path.Join(tempDir, "config")
- kubeconfigContent := `
-apiVersion: v1
-kind: Config
-clusters:
-- cluster:
- server: https://test-cluster.example.com
- name: test-cluster
-contexts:
-- context:
- cluster: test-cluster
- user: test-user
- name: test-context
-current-context: test-context
-users:
-- name: test-user
- user:
- username: test-username
- password: test-password
-`
- if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
- t.Fatalf("failed to create kubeconfig file: %v", err)
- }
-
- t.Run("without authorization header returns original manager", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- ctx := context.Background()
- derived, err := testManager.Derived(ctx)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
-
- if derived.manager != testManager {
- t.Errorf("expected original manager, got different manager")
- }
- })
-
- t.Run("with invalid authorization header returns original manager", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
- derived, err := testManager.Derived(ctx)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
-
- if derived.manager != testManager {
- t.Errorf("expected original manager, got different manager")
- }
- })
-
- t.Run("with valid bearer token creates derived manager with correct configuration", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- testBearerToken := "test-bearer-token-123"
- ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
- derived, err := testManager.Derived(ctx)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
-
- if derived.manager == testManager {
- t.Errorf("expected new derived manager, got original manager")
- }
-
- if derived.manager.staticConfig != testStaticConfig {
- t.Errorf("staticConfig not properly wired to derived manager")
- }
-
- derivedCfg := derived.manager.cfg
- if derivedCfg == nil {
- t.Fatalf("derived config is nil")
- }
-
- originalCfg := testManager.cfg
- if derivedCfg.Host != originalCfg.Host {
- t.Errorf("expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
- }
- if derivedCfg.APIPath != originalCfg.APIPath {
- t.Errorf("expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
- }
- if derivedCfg.QPS != originalCfg.QPS {
- t.Errorf("expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
- }
- if derivedCfg.Burst != originalCfg.Burst {
- t.Errorf("expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
- }
- if derivedCfg.Timeout != originalCfg.Timeout {
- t.Errorf("expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
- }
-
- if derivedCfg.Insecure != originalCfg.Insecure {
- t.Errorf("expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
- }
- if derivedCfg.ServerName != originalCfg.ServerName {
- t.Errorf("expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
- }
- if derivedCfg.CAFile != originalCfg.CAFile {
- t.Errorf("expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
- }
- if string(derivedCfg.CAData) != string(originalCfg.CAData) {
- t.Errorf("expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
- }
-
- if derivedCfg.BearerToken != testBearerToken {
- t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
- }
- if derivedCfg.UserAgent != CustomUserAgent {
- t.Errorf("expected UserAgent %s, got %s", CustomUserAgent, derivedCfg.UserAgent)
- }
-
- // Verify that sensitive fields are NOT copied to prevent credential leakage
- // The derived config should only use the bearer token from the Authorization header
- // and not inherit any authentication credentials from the original kubeconfig
- if derivedCfg.CertFile != "" {
- t.Errorf("expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
- }
- if derivedCfg.KeyFile != "" {
- t.Errorf("expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
- }
- if len(derivedCfg.CertData) != 0 {
- t.Errorf("expected TLS CertData to be empty, got %v", derivedCfg.CertData)
- }
- if len(derivedCfg.KeyData) != 0 {
- t.Errorf("expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
- }
-
- if derivedCfg.Username != "" {
- t.Errorf("expected Username to be empty, got %s", derivedCfg.Username)
- }
- if derivedCfg.Password != "" {
- t.Errorf("expected Password to be empty, got %s", derivedCfg.Password)
- }
- if derivedCfg.AuthProvider != nil {
- t.Errorf("expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
- }
- if derivedCfg.ExecProvider != nil {
- t.Errorf("expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
- }
- if derivedCfg.BearerTokenFile != "" {
- t.Errorf("expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
- }
- if derivedCfg.Impersonate.UserName != "" {
- t.Errorf("expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
- }
-
- // Verify that the original manager still has the sensitive data
- if originalCfg.Username == "" && originalCfg.Password == "" {
- t.Logf("original kubeconfig shouldn't be modified")
- }
-
- // Verify that the derived manager has proper clients initialized
- if derived.manager.accessControlClientSet == nil {
- t.Error("expected accessControlClientSet to be initialized")
- }
- if derived.manager.accessControlClientSet.staticConfig != testStaticConfig {
- t.Errorf("staticConfig not properly wired to derived manager")
- }
- if derived.manager.discoveryClient == nil {
- t.Error("expected discoveryClient to be initialized")
- }
- if derived.manager.accessControlRESTMapper == nil {
- t.Error("expected accessControlRESTMapper to be initialized")
- }
- if derived.manager.accessControlRESTMapper.staticConfig != testStaticConfig {
- t.Errorf("staticConfig not properly wired to derived manager")
- }
- if derived.manager.dynamicClient == nil {
- t.Error("expected dynamicClient to be initialized")
- }
- })
-
- t.Run("with RequireOAuth=true and no authorization header returns oauth token required error", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- RequireOAuth: true,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- ctx := context.Background()
- derived, err := testManager.Derived(ctx)
- if err == nil {
- t.Fatal("expected error for missing oauth token, got nil")
- }
- if err.Error() != "oauth token required" {
- t.Fatalf("expected error 'oauth token required', got %s", err.Error())
- }
- if derived != nil {
- t.Error("expected nil derived manager when oauth token required")
- }
- })
-
- t.Run("with RequireOAuth=true and invalid authorization header returns oauth token required error", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- RequireOAuth: true,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
- derived, err := testManager.Derived(ctx)
- if err == nil {
- t.Fatal("expected error for invalid oauth token, got nil")
- }
- if err.Error() != "oauth token required" {
- t.Fatalf("expected error 'oauth token required', got %s", err.Error())
- }
- if derived != nil {
- t.Error("expected nil derived manager when oauth token required")
- }
- })
-
- t.Run("with RequireOAuth=true and valid bearer token creates derived manager", func(t *testing.T) {
- testStaticConfig := &config.StaticConfig{
- KubeConfig: kubeconfigPath,
- RequireOAuth: true,
- DisabledTools: []string{"configuration_view"},
- DeniedResources: []config.GroupVersionKind{
- {Group: "apps", Version: "v1", Kind: "Deployment"},
- },
- }
-
- testManager, err := NewManager(testStaticConfig)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
- defer testManager.Close()
- testBearerToken := "test-bearer-token-123"
- ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
- derived, err := testManager.Derived(ctx)
- if err != nil {
- t.Fatalf("failed to create manager: %v", err)
- }
-
- if derived.manager == testManager {
- t.Error("expected new derived manager, got original manager")
- }
-
- if derived.manager.staticConfig != testStaticConfig {
- t.Error("staticConfig not properly wired to derived manager")
- }
-
- derivedCfg := derived.manager.cfg
- if derivedCfg == nil {
- t.Fatal("derived config is nil")
- }
-
- if derivedCfg.BearerToken != testBearerToken {
- t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
- }
- })
-}
diff --git a/pkg/kubernetes/manager.go b/pkg/kubernetes/manager.go
new file mode 100644
index 00000000..d09b8790
--- /dev/null
+++ b/pkg/kubernetes/manager.go
@@ -0,0 +1,301 @@
+package kubernetes
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/containers/kubernetes-mcp-server/pkg/helm"
+ "github.com/fsnotify/fsnotify"
+ authenticationv1api "k8s.io/api/authentication/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/discovery/cached/memory"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/restmapper"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ "k8s.io/klog/v2"
+)
+
+type Manager struct {
+ cfg *rest.Config
+ clientCmdConfig clientcmd.ClientConfig
+ discoveryClient discovery.CachedDiscoveryInterface
+ accessControlClientSet *AccessControlClientset
+ accessControlRESTMapper *AccessControlRESTMapper
+ dynamicClient *dynamic.DynamicClient
+
+ staticConfig *config.StaticConfig
+ CloseWatchKubeConfig CloseWatchKubeConfig
+}
+
+var _ helm.Kubernetes = (*Manager)(nil)
+var _ Openshift = (*Manager)(nil)
+
+var (
+ ErrorKubeconfigInClusterNotAllowed = errors.New("kubeconfig manager cannot be used in in-cluster deployments")
+ ErrorInClusterNotInCluster = errors.New("in-cluster manager cannot be used outside of a cluster")
+)
+
+func NewKubeconfigManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) {
+ if IsInCluster(config) {
+ return nil, ErrorKubeconfigInClusterNotAllowed
+ }
+
+ pathOptions := clientcmd.NewDefaultPathOptions()
+ if config.KubeConfig != "" {
+ pathOptions.LoadingRules.ExplicitPath = config.KubeConfig
+ }
+ clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ pathOptions.LoadingRules,
+ &clientcmd.ConfigOverrides{
+ ClusterInfo: clientcmdapi.Cluster{Server: ""},
+ CurrentContext: kubeconfigContext,
+ })
+
+ restConfig, err := clientCmdConfig.ClientConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create kubernetes rest config from kubeconfig: %v", err)
+ }
+
+ return newManager(config, restConfig, clientCmdConfig)
+}
+
+func NewInClusterManager(config *config.StaticConfig) (*Manager, error) {
+ if config.KubeConfig != "" {
+ return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster deployments: %v", config.KubeConfig, ErrorKubeconfigInClusterNotAllowed)
+ }
+
+ if !IsInCluster(config) {
+ return nil, ErrorInClusterNotInCluster
+ }
+
+ restConfig, err := InClusterConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create in-cluster kubernetes rest config: %v", err)
+ }
+
+ // Create a dummy kubeconfig clientcmdapi.Config for in-cluster config to be used in places where clientcmd.ClientConfig is required
+ clientCmdConfig := clientcmdapi.NewConfig()
+ clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{
+ Server: restConfig.Host,
+ InsecureSkipTLSVerify: restConfig.Insecure,
+ }
+ clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{
+ Token: restConfig.BearerToken,
+ }
+ clientCmdConfig.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
+ Cluster: "cluster",
+ AuthInfo: "user",
+ }
+ clientCmdConfig.CurrentContext = inClusterKubeConfigDefaultContext
+
+ return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil))
+}
+
+func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) {
+ k8s := &Manager{
+ staticConfig: config,
+ cfg: restConfig,
+ clientCmdConfig: clientCmdConfig,
+ }
+ if k8s.cfg.UserAgent == "" {
+ k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+ var err error
+ // TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
+ //k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
+ // return &impersonateRoundTripper{original}
+ //})
+ k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
+ if err != nil {
+ return nil, err
+ }
+ k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
+ k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
+ restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
+ k8s.staticConfig,
+ )
+ k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
+ if err != nil {
+ return nil, err
+ }
+ return k8s, nil
+}
+
+func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
+ if m.clientCmdConfig == nil {
+ return
+ }
+ kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
+ if len(kubeConfigFiles) == 0 {
+ return
+ }
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ return
+ }
+ for _, file := range kubeConfigFiles {
+ _ = watcher.Add(file)
+ }
+ go func() {
+ for {
+ select {
+ case _, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ _ = onKubeConfigChange()
+ case _, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ }
+ }
+ }()
+ if m.CloseWatchKubeConfig != nil {
+ _ = m.CloseWatchKubeConfig()
+ }
+ m.CloseWatchKubeConfig = watcher.Close
+}
+
+func (m *Manager) Close() {
+ if m.CloseWatchKubeConfig != nil {
+ _ = m.CloseWatchKubeConfig()
+ }
+}
+
+func (m *Manager) configuredNamespace() string {
+ if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
+ return ns
+ }
+ return ""
+}
+
+func (m *Manager) NamespaceOrDefault(namespace string) string {
+ if namespace == "" {
+ return m.configuredNamespace()
+ }
+ return namespace
+}
+
+func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+ return m.discoveryClient, nil
+}
+
+func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
+ return m.accessControlRESTMapper, nil
+}
+
+// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
+func (m *Manager) ToRESTConfig() (*rest.Config, error) {
+ return m.cfg, nil
+}
+
+// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
+func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+ return m.clientCmdConfig
+}
+
+func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
+ tokenReviewClient, err := m.accessControlClientSet.TokenReview()
+ if err != nil {
+ return nil, nil, err
+ }
+ tokenReview := &authenticationv1api.TokenReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "authentication.k8s.io/v1",
+ Kind: "TokenReview",
+ },
+ Spec: authenticationv1api.TokenReviewSpec{
+ Token: token,
+ Audiences: []string{audience},
+ },
+ }
+
+ result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create token review: %v", err)
+ }
+
+ if !result.Status.Authenticated {
+ if result.Status.Error != "" {
+ return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
+ }
+ return nil, nil, fmt.Errorf("token authentication failed")
+ }
+
+ return &result.Status.User, result.Status.Audiences, nil
+}
+
+func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
+ authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
+ if !ok || !strings.HasPrefix(authorization, "Bearer ") {
+ if m.staticConfig.RequireOAuth {
+ return nil, errors.New("oauth token required")
+ }
+ return &Kubernetes{manager: m}, nil
+ }
+ klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
+ derivedCfg := &rest.Config{
+ Host: m.cfg.Host,
+ APIPath: m.cfg.APIPath,
+ // Copy only server verification TLS settings (CA bundle and server name)
+ TLSClientConfig: rest.TLSClientConfig{
+ Insecure: m.cfg.Insecure,
+ ServerName: m.cfg.ServerName,
+ CAFile: m.cfg.CAFile,
+ CAData: m.cfg.CAData,
+ },
+ BearerToken: strings.TrimPrefix(authorization, "Bearer "),
+ // pass custom UserAgent to identify the client
+ UserAgent: CustomUserAgent,
+ QPS: m.cfg.QPS,
+ Burst: m.cfg.Burst,
+ Timeout: m.cfg.Timeout,
+ Impersonate: rest.ImpersonationConfig{},
+ }
+ clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
+ if err != nil {
+ if m.staticConfig.RequireOAuth {
+ klog.Errorf("failed to get kubeconfig: %v", err)
+ return nil, errors.New("failed to get kubeconfig")
+ }
+ return &Kubernetes{manager: m}, nil
+ }
+ clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
+ derived := &Kubernetes{
+ manager: &Manager{
+ clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
+ cfg: derivedCfg,
+ staticConfig: m.staticConfig,
+ },
+ }
+ derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
+ if err != nil {
+ if m.staticConfig.RequireOAuth {
+ klog.Errorf("failed to get kubeconfig: %v", err)
+ return nil, errors.New("failed to get kubeconfig")
+ }
+ return &Kubernetes{manager: m}, nil
+ }
+ derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
+ derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
+ restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
+ derived.manager.staticConfig,
+ )
+ derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
+ if err != nil {
+ if m.staticConfig.RequireOAuth {
+ klog.Errorf("failed to initialize dynamic client: %v", err)
+ return nil, errors.New("failed to initialize dynamic client")
+ }
+ return &Kubernetes{manager: m}, nil
+ }
+ return derived, nil
+}
diff --git a/pkg/kubernetes/manager_test.go b/pkg/kubernetes/manager_test.go
new file mode 100644
index 00000000..63241fa9
--- /dev/null
+++ b/pkg/kubernetes/manager_test.go
@@ -0,0 +1,202 @@
+package kubernetes
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/client-go/rest"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type ManagerTestSuite struct {
+ suite.Suite
+ originalEnv []string
+ originalInClusterConfig func() (*rest.Config, error)
+ mockServer *test.MockServer
+}
+
+func (s *ManagerTestSuite) SetupTest() {
+ s.originalEnv = os.Environ()
+ s.originalInClusterConfig = InClusterConfig
+ s.mockServer = test.NewMockServer()
+}
+
+func (s *ManagerTestSuite) TearDownTest() {
+ test.RestoreEnv(s.originalEnv)
+ InClusterConfig = s.originalInClusterConfig
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *ManagerTestSuite) TestNewInClusterManager() {
+ s.Run("In cluster", func() {
+ InClusterConfig = func() (*rest.Config, error) {
+ return &rest.Config{}, nil
+ }
+ s.Run("with default StaticConfig (empty kubeconfig)", func() {
+ manager, err := NewInClusterManager(&config.StaticConfig{})
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+ s.Run("behaves as in cluster", func() {
+ rawConfig, err := manager.clientCmdConfig.RawConfig()
+ s.Require().NoError(err)
+ s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'")
+ })
+ s.Run("sets default user-agent", func() {
+ s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
+ })
+ })
+ s.Run("with explicit kubeconfig", func() {
+ manager, err := NewInClusterManager(&config.StaticConfig{
+ KubeConfig: s.mockServer.KubeconfigFile(s.T()),
+ })
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.Regexp("kubeconfig file .+ cannot be used with the in-cluster deployments", err.Error())
+ })
+ })
+ })
+ s.Run("Out of cluster", func() {
+ InClusterConfig = func() (*rest.Config, error) {
+ return nil, rest.ErrNotInCluster
+ }
+ manager, err := NewInClusterManager(&config.StaticConfig{})
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.ErrorIs(err, ErrorInClusterNotInCluster)
+ s.ErrorContains(err, "in-cluster manager cannot be used outside of a cluster")
+ })
+ })
+}
+
+func (s *ManagerTestSuite) TestNewKubeconfigManager() {
+ s.Run("Out of cluster", func() {
+ InClusterConfig = func() (*rest.Config, error) {
+ return nil, rest.ErrNotInCluster
+ }
+ s.Run("with valid kubeconfig in env", func() {
+ kubeconfig := s.mockServer.KubeconfigFile(s.T())
+ s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig))
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+ s.Run("behaves as NOT in cluster", func() {
+ rawConfig, err := manager.clientCmdConfig.RawConfig()
+ s.Require().NoError(err)
+ s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
+ s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
+ })
+ s.Run("loads correct config", func() {
+ s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
+ })
+ s.Run("sets default user-agent", func() {
+ s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
+ })
+ s.Run("rest config host points to mock server", func() {
+ s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ })
+ })
+ s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
+ kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
+ s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
+ kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T())
+ manager, err := NewKubeconfigManager(&config.StaticConfig{
+ KubeConfig: kubeconfigExplicit,
+ }, "")
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+ s.Run("behaves as NOT in cluster", func() {
+ rawConfig, err := manager.clientCmdConfig.RawConfig()
+ s.Require().NoError(err)
+ s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
+ s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
+ })
+ s.Run("loads correct config (explicit)", func() {
+ s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env")
+ s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
+ })
+ s.Run("rest config host points to mock server", func() {
+ s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ })
+ })
+ s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
+ kubeconfig := s.mockServer.Kubeconfig()
+ kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext()
+ kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server"
+ kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster()
+ kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this
+ kubeconfig.CurrentContext = "not-the-mock-server"
+ kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig)
+ s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile))
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+ s.Run("behaves as NOT in cluster", func() {
+ rawConfig, err := manager.clientCmdConfig.RawConfig()
+ s.Require().NoError(err)
+ s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
+ s.Equal("not-the-mock-server", rawConfig.CurrentContext, "expected current context to be 'not-the-mock-server' as in explicit context")
+ })
+ s.Run("loads correct config", func() {
+ s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
+ })
+ s.Run("rest config host points to mock server", func() {
+ s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ })
+ })
+ s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
+ kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
+ s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "i-do-not-exist")
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.ErrorContains(err, `failed to create kubernetes rest config from kubeconfig: context "i-do-not-exist" does not exist`)
+ })
+ })
+ s.Run("with invalid path kubeconfig in env", func() {
+ s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist"))
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.ErrorContains(err, "failed to create kubernetes rest config")
+ })
+ })
+ s.Run("with empty kubeconfig in env", func() {
+ kubeconfigPath := filepath.Join(s.T().TempDir(), "config")
+ s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644))
+ s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath))
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.ErrorContains(err, "no configuration has been provided")
+ })
+ })
+ })
+ s.Run("In cluster", func() {
+ InClusterConfig = func() (*rest.Config, error) {
+ return &rest.Config{}, nil
+ }
+ manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
+ s.Run("returns error", func() {
+ s.Error(err)
+ s.Nil(manager)
+ s.ErrorIs(err, ErrorKubeconfigInClusterNotAllowed)
+ s.ErrorContains(err, "kubeconfig manager cannot be used in in-cluster deployments")
+ })
+ })
+}
+
+func TestManager(t *testing.T) {
+ suite.Run(t, new(ManagerTestSuite))
+}
diff --git a/pkg/kubernetes/nodes.go b/pkg/kubernetes/nodes.go
new file mode 100644
index 00000000..a4321a9f
--- /dev/null
+++ b/pkg/kubernetes/nodes.go
@@ -0,0 +1,79 @@
+package kubernetes
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/metrics/pkg/apis/metrics"
+ metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
+)
+
+func (k *Kubernetes) NodesLog(ctx context.Context, name string, query string, tailLines int64) (string, error) {
+ // Use the node proxy API to access logs from the kubelet
+ // https://kubernetes.io/docs/concepts/cluster-administration/system-logs/#log-query
+ // Common log paths:
+ // - /var/log/kubelet.log - kubelet logs
+ // - /var/log/kube-proxy.log - kube-proxy logs
+ // - /var/log/containers/ - container logs
+
+ req, err := k.AccessControlClientset().NodesLogs(ctx, name)
+ if err != nil {
+ return "", err
+ }
+
+ req.Param("query", query)
+ // Query parameters for tail
+ if tailLines > 0 {
+ req.Param("tailLines", fmt.Sprintf("%d", tailLines))
+ }
+
+ result := req.Do(ctx)
+ if result.Error() != nil {
+ return "", fmt.Errorf("failed to get node logs: %w", result.Error())
+ }
+
+ rawData, err := result.Raw()
+ if err != nil {
+ return "", fmt.Errorf("failed to read node log response: %w", err)
+ }
+
+ return string(rawData), nil
+}
+
+func (k *Kubernetes) NodesStatsSummary(ctx context.Context, name string) (string, error) {
+ // Use the node proxy API to access stats summary from the kubelet
+ // https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/
+ // This endpoint provides CPU, memory, filesystem, and network statistics
+
+ req, err := k.AccessControlClientset().NodesStatsSummary(ctx, name)
+ if err != nil {
+ return "", err
+ }
+
+ result := req.Do(ctx)
+ if result.Error() != nil {
+ return "", fmt.Errorf("failed to get node stats summary: %w", result.Error())
+ }
+
+ rawData, err := result.Raw()
+ if err != nil {
+ return "", fmt.Errorf("failed to read node stats summary response: %w", err)
+ }
+
+ return string(rawData), nil
+}
+
+type NodesTopOptions struct {
+ metav1.ListOptions
+ Name string
+}
+
+func (k *Kubernetes) NodesTop(ctx context.Context, options NodesTopOptions) (*metrics.NodeMetricsList, error) {
+ // TODO, maybe move to mcp Tools setup and omit in case metrics aren't available in the target cluster
+ if !k.supportsGroupVersion(metrics.GroupName + "/" + metricsv1beta1api.SchemeGroupVersion.Version) {
+ return nil, errors.New("metrics API is not available")
+ }
+ return k.manager.accessControlClientSet.NodesMetricses(ctx, options.Name, options.ListOptions)
+}
diff --git a/pkg/kubernetes/provider.go b/pkg/kubernetes/provider.go
index 6ba0034b..092c7de8 100644
--- a/pkg/kubernetes/provider.go
+++ b/pkg/kubernetes/provider.go
@@ -4,93 +4,45 @@ import (
"context"
"github.com/containers/kubernetes-mcp-server/pkg/config"
- "k8s.io/client-go/discovery/cached/memory"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/restmapper"
- "k8s.io/client-go/tools/clientcmd"
)
-type ManagerProvider interface {
+type Provider interface {
+ // Openshift extends the Openshift interface to provide OpenShift specific functionality to toolset providers
+ // TODO: with the configurable toolset implementation and especially the multi-cluster approach
+ // extending this interface might not be a good idea anymore.
+ // For the kubecontext case, a user might be targeting both an OpenShift flavored cluster and a vanilla Kubernetes cluster.
+ // See: https://github.com/containers/kubernetes-mcp-server/pull/372#discussion_r2421592315
+ Openshift
+ TokenVerifier
GetTargets(ctx context.Context) ([]string, error)
- GetManagerFor(ctx context.Context, target string) (*Manager, error)
+ GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error)
GetDefaultTarget() string
GetTargetParameterName() string
WatchTargets(func() error)
Close()
}
-func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
- m, err := NewManager(cfg)
- if err != nil {
- return nil, err
- }
-
- strategy := resolveStrategy(cfg, m)
+func NewProvider(cfg *config.StaticConfig) (Provider, error) {
+ strategy := resolveStrategy(cfg)
factory, err := getProviderFactory(strategy)
if err != nil {
return nil, err
}
- return factory(m, cfg)
+ return factory(cfg)
}
-func (m *Manager) newForContext(context string) (*Manager, error) {
- pathOptions := clientcmd.NewDefaultPathOptions()
- if m.staticConfig.KubeConfig != "" {
- pathOptions.LoadingRules.ExplicitPath = m.staticConfig.KubeConfig
- }
-
- clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
- pathOptions.LoadingRules,
- &clientcmd.ConfigOverrides{
- CurrentContext: context,
- },
- )
-
- cfg, err := clientCmdConfig.ClientConfig()
- if err != nil {
- return nil, err
- }
-
- if cfg.UserAgent == "" {
- cfg.UserAgent = rest.DefaultKubernetesUserAgent()
- }
-
- manager := &Manager{
- cfg: cfg,
- clientCmdConfig: clientCmdConfig,
- staticConfig: m.staticConfig,
- }
-
- // Initialize clients for new manager
- manager.accessControlClientSet, err = NewAccessControlClientset(manager.cfg, manager.staticConfig)
- if err != nil {
- return nil, err
- }
-
- manager.discoveryClient = memory.NewMemCacheClient(manager.accessControlClientSet.DiscoveryClient())
-
- manager.accessControlRESTMapper = NewAccessControlRESTMapper(
- restmapper.NewDeferredDiscoveryRESTMapper(manager.discoveryClient),
- manager.staticConfig,
- )
-
- manager.dynamicClient, err = dynamic.NewForConfig(manager.cfg)
- if err != nil {
- return nil, err
- }
-
- return manager, nil
-}
-
-func resolveStrategy(cfg *config.StaticConfig, m *Manager) string {
+func resolveStrategy(cfg *config.StaticConfig) string {
if cfg.ClusterProviderStrategy != "" {
return cfg.ClusterProviderStrategy
}
- if m.IsInCluster() {
+ if cfg.KubeConfig != "" {
+ return config.ClusterProviderKubeConfig
+ }
+
+ if _, inClusterConfigErr := InClusterConfig(); inClusterConfigErr == nil {
return config.ClusterProviderInCluster
}
diff --git a/pkg/kubernetes/provider_kubeconfig.go b/pkg/kubernetes/provider_kubeconfig.go
index 1da46a58..9ab055c8 100644
--- a/pkg/kubernetes/provider_kubeconfig.go
+++ b/pkg/kubernetes/provider_kubeconfig.go
@@ -2,16 +2,18 @@ package kubernetes
import (
"context"
+ "errors"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
+ authenticationv1api "k8s.io/api/authentication/v1"
)
// KubeConfigTargetParameterName is the parameter name used to specify
// the kubeconfig context when using the kubeconfig cluster provider strategy.
const KubeConfigTargetParameterName = "context"
-// kubeConfigClusterProvider implements ManagerProvider for managing multiple
+// kubeConfigClusterProvider implements Provider for managing multiple
// Kubernetes clusters using different contexts from a kubeconfig file.
// It lazily initializes managers for each context as they are requested.
type kubeConfigClusterProvider struct {
@@ -19,18 +21,23 @@ type kubeConfigClusterProvider struct {
managers map[string]*Manager
}
-var _ ManagerProvider = &kubeConfigClusterProvider{}
+var _ Provider = &kubeConfigClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
}
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
-// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
-func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
- // Handle in-cluster mode
- if m.IsInCluster() {
- return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
+// via kubeconfig contexts.
+// Internally, it leverages a KubeconfigManager for each context, initializing them
+// lazily when requested.
+func newKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) {
+ m, err := NewKubeconfigManager(cfg, "")
+ if err != nil {
+ if errors.Is(err, ErrorKubeconfigInClusterNotAllowed) {
+ return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments: %v", err)
+ }
+ return nil, err
}
rawConfig, err := m.clientCmdConfig.RawConfig()
@@ -56,54 +63,69 @@ func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Manager
}, nil
}
-func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
- contextNames := make([]string, 0, len(k.managers))
- for cluster := range k.managers {
- contextNames = append(contextNames, cluster)
+func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, error) {
+ m, ok := p.managers[context]
+ if ok && m != nil {
+ return m, nil
}
- return contextNames, nil
+ baseManager := p.managers[p.defaultContext]
+
+ m, err := NewKubeconfigManager(baseManager.staticConfig, context)
+ if err != nil {
+ return nil, err
+ }
+
+ p.managers[context] = m
+
+ return m, nil
}
-func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
- return KubeConfigTargetParameterName
+func (p *kubeConfigClusterProvider) IsOpenShift(ctx context.Context) bool {
+ return p.managers[p.defaultContext].IsOpenShift(ctx)
}
-func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
- m, ok := k.managers[context]
- if ok && m != nil {
- return m, nil
+func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
+ m, err := p.managerForContext(context)
+ if err != nil {
+ return nil, nil, err
}
+ return m.VerifyToken(ctx, token, audience)
+}
- baseManager := k.managers[k.defaultContext]
-
- if baseManager.IsInCluster() {
- // In cluster mode, so context switching is not applicable
- return baseManager, nil
+func (p *kubeConfigClusterProvider) GetTargets(_ context.Context) ([]string, error) {
+ contextNames := make([]string, 0, len(p.managers))
+ for contextName := range p.managers {
+ contextNames = append(contextNames, contextName)
}
- m, err := baseManager.newForContext(context)
+ return contextNames, nil
+}
+
+func (p *kubeConfigClusterProvider) GetTargetParameterName() string {
+ return KubeConfigTargetParameterName
+}
+
+func (p *kubeConfigClusterProvider) GetDerivedKubernetes(ctx context.Context, context string) (*Kubernetes, error) {
+ m, err := p.managerForContext(context)
if err != nil {
return nil, err
}
-
- k.managers[context] = m
-
- return m, nil
+ return m.Derived(ctx)
}
-func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
- return k.defaultContext
+func (p *kubeConfigClusterProvider) GetDefaultTarget() string {
+ return p.defaultContext
}
-func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
- m := k.managers[k.defaultContext]
+func (p *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
+ m := p.managers[p.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
-func (k *kubeConfigClusterProvider) Close() {
- m := k.managers[k.defaultContext]
+func (p *kubeConfigClusterProvider) Close() {
+ m := p.managers[p.defaultContext]
m.Close()
}
diff --git a/pkg/kubernetes/provider_kubeconfig_test.go b/pkg/kubernetes/provider_kubeconfig_test.go
new file mode 100644
index 00000000..33ba60d6
--- /dev/null
+++ b/pkg/kubernetes/provider_kubeconfig_test.go
@@ -0,0 +1,133 @@
+package kubernetes
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type ProviderKubeconfigTestSuite struct {
+ BaseProviderSuite
+ mockServer *test.MockServer
+ provider Provider
+}
+
+func (s *ProviderKubeconfigTestSuite) SetupTest() {
+ // Kubeconfig provider is used when the multi-cluster feature is enabled with the kubeconfig strategy.
+ // For this test suite we simulate a kubeconfig with multiple contexts.
+ s.mockServer = test.NewMockServer()
+ kubeconfig := s.mockServer.Kubeconfig()
+ for i := 0; i < 10; i++ {
+ // Add multiple fake contexts to force multi-cluster behavior
+ kubeconfig.Contexts[fmt.Sprintf("context-%d", i)] = clientcmdapi.NewContext()
+ }
+ provider, err := NewProvider(&config.StaticConfig{KubeConfig: test.KubeconfigFile(s.T(), kubeconfig)})
+ s.Require().NoError(err, "Expected no error creating provider with kubeconfig")
+ s.provider = provider
+}
+
+func (s *ProviderKubeconfigTestSuite) TearDownTest() {
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *ProviderKubeconfigTestSuite) TestType() {
+ s.IsType(&kubeConfigClusterProvider{}, s.provider)
+}
+
+func (s *ProviderKubeconfigTestSuite) TestWithNonOpenShiftCluster() {
+ s.Run("IsOpenShift returns false", func() {
+ inOpenShift := s.provider.IsOpenShift(s.T().Context())
+ s.False(inOpenShift, "Expected InOpenShift to return false")
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestWithOpenShiftCluster() {
+ s.mockServer.Handle(&test.InOpenShiftHandler{})
+ s.Run("IsOpenShift returns true", func() {
+ inOpenShift := s.provider.IsOpenShift(s.T().Context())
+ s.True(inOpenShift, "Expected InOpenShift to return true")
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestVerifyToken() {
+ s.mockServer.Handle(&test.TokenReviewHandler{})
+
+ s.Run("VerifyToken returns UserInfo for non-empty context", func() {
+ userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "fake-context", "some-token", "the-audience")
+ s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
+ s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
+ s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
+ s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
+ s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
+ s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
+ s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
+ })
+ s.Run("VerifyToken returns UserInfo for empty context (default context)", func() {
+ userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience")
+ s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
+ s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
+ s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
+ s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
+ s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
+ s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
+ s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
+ })
+ s.Run("VerifyToken returns error for invalid context", func() {
+ userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "invalid-context", "some-token", "the-audience")
+ s.Require().Error(err, "Expected error from VerifyToken with invalid target")
+ s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err)
+ s.Nil(userInfo, "Expected no UserInfo from VerifyToken with invalid target")
+ s.Nil(audiences, "Expected no audiences from VerifyToken with invalid target")
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestGetTargets() {
+ s.Run("GetTargets returns all contexts defined in kubeconfig", func() {
+ targets, err := s.provider.GetTargets(s.T().Context())
+ s.Require().NoError(err, "Expected no error from GetTargets")
+ s.Len(targets, 11, "Expected 11 targets from GetTargets")
+ s.Contains(targets, "fake-context", "Expected fake-context in targets from GetTargets")
+ for i := 0; i < 10; i++ {
+ s.Contains(targets, fmt.Sprintf("context-%d", i), "Expected context-%d in targets from GetTargets", i)
+ }
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestGetDerivedKubernetes() {
+ s.Run("GetDerivedKubernetes returns Kubernetes for valid context", func() {
+ k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "fake-context")
+ s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with valid context")
+ s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with valid context")
+ })
+ s.Run("GetDerivedKubernetes returns Kubernetes for empty context (default)", func() {
+ k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "")
+ s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty context")
+ s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty context")
+ })
+ s.Run("GetDerivedKubernetes returns error for invalid context", func() {
+ k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "invalid-context")
+ s.Require().Error(err, "Expected error from GetDerivedKubernetes with invalid context")
+ s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err)
+ s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with invalid context")
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestGetDefaultTarget() {
+ s.Run("GetDefaultTarget returns current-context defined in kubeconfig", func() {
+ s.Equal("fake-context", s.provider.GetDefaultTarget(), "Expected fake-context as default target")
+ })
+}
+
+func (s *ProviderKubeconfigTestSuite) TestGetTargetParameterName() {
+ s.Equal("context", s.provider.GetTargetParameterName(), "Expected context as target parameter name")
+}
+
+func TestProviderKubeconfig(t *testing.T) {
+ suite.Run(t, new(ProviderKubeconfigTestSuite))
+}
diff --git a/pkg/kubernetes/provider_registry.go b/pkg/kubernetes/provider_registry.go
index 67fa79b5..b9077f15 100644
--- a/pkg/kubernetes/provider_registry.go
+++ b/pkg/kubernetes/provider_registry.go
@@ -7,10 +7,10 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
-// ProviderFactory creates a new ManagerProvider instance for a given strategy.
+// ProviderFactory creates a new Provider instance for a given strategy.
// Implementations should validate that the Manager is compatible with their strategy
// (e.g., kubeconfig provider should reject in-cluster managers).
-type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error)
+type ProviderFactory func(cfg *config.StaticConfig) (Provider, error)
var providerFactories = make(map[string]ProviderFactory)
diff --git a/pkg/kubernetes/provider_registry_test.go b/pkg/kubernetes/provider_registry_test.go
index e52fbdfd..c94e1ec1 100644
--- a/pkg/kubernetes/provider_registry_test.go
+++ b/pkg/kubernetes/provider_registry_test.go
@@ -13,18 +13,18 @@ type ProviderRegistryTestSuite struct {
func (s *ProviderRegistryTestSuite) TestRegisterProvider() {
s.Run("With no pre-existing provider, registers the provider", func() {
- RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
+ RegisterProvider("test-strategy", func(cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
_, exists := providerFactories["test-strategy"]
s.True(exists, "Provider should be registered")
})
s.Run("With pre-existing provider, panics", func() {
- RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
+ RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
s.Panics(func() {
- RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
+ RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
}, "Registering a provider with an existing strategy should panic")
@@ -39,10 +39,10 @@ func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() {
})
s.Run("With multiple registered providers, returns sorted list", func() {
providerFactories = make(map[string]ProviderFactory)
- RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
+ RegisterProvider("foo-strategy", func(cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
- RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
+ RegisterProvider("bar-strategy", func(cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
strategies := GetRegisteredStrategies()
diff --git a/pkg/kubernetes/provider_single.go b/pkg/kubernetes/provider_single.go
index fe91f2a0..3693d639 100644
--- a/pkg/kubernetes/provider_single.go
+++ b/pkg/kubernetes/provider_single.go
@@ -2,12 +2,14 @@ package kubernetes
import (
"context"
+ "errors"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
+ authenticationv1api "k8s.io/api/authentication/v1"
)
-// singleClusterProvider implements ManagerProvider for managing a single
+// singleClusterProvider implements Provider for managing a single
// Kubernetes cluster. Used for in-cluster deployments or when multi-cluster
// support is disabled.
type singleClusterProvider struct {
@@ -15,7 +17,7 @@ type singleClusterProvider struct {
manager *Manager
}
-var _ ManagerProvider = &singleClusterProvider{}
+var _ Provider = &singleClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster))
@@ -23,11 +25,26 @@ func init() {
}
// newSingleClusterProvider creates a provider that manages a single cluster.
-// Validates that the manager is in-cluster when the in-cluster strategy is used.
+// When used within a cluster or with an 'in-cluster' strategy, it uses an InClusterManager.
+// Otherwise, it uses a KubeconfigManager.
func newSingleClusterProvider(strategy string) ProviderFactory {
- return func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
- if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
- return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
+ return func(cfg *config.StaticConfig) (Provider, error) {
+ if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster {
+ return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig)
+ }
+
+ var m *Manager
+ var err error
+ if strategy == config.ClusterProviderInCluster || IsInCluster(cfg) {
+ m, err = NewInClusterManager(cfg)
+ } else {
+ m, err = NewKubeconfigManager(cfg, "")
+ }
+ if err != nil {
+ if errors.Is(err, ErrorInClusterNotInCluster) {
+ return nil, fmt.Errorf("server must be deployed in cluster for the %s ClusterProviderStrategy: %v", strategy, err)
+ }
+ return nil, err
}
return &singleClusterProvider{
@@ -37,30 +54,41 @@ func newSingleClusterProvider(strategy string) ProviderFactory {
}
}
-func (s *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
+func (p *singleClusterProvider) IsOpenShift(ctx context.Context) bool {
+ return p.manager.IsOpenShift(ctx)
+}
+
+func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
+ if target != "" {
+ return nil, nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
+ }
+ return p.manager.VerifyToken(ctx, token, audience)
+}
+
+func (p *singleClusterProvider) GetTargets(_ context.Context) ([]string, error) {
return []string{""}, nil
}
-func (s *singleClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
+func (p *singleClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) {
if target != "" {
- return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", s.strategy)
+ return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
}
- return s.manager, nil
+ return p.manager.Derived(ctx)
}
-func (s *singleClusterProvider) GetDefaultTarget() string {
+func (p *singleClusterProvider) GetDefaultTarget() string {
return ""
}
-func (s *singleClusterProvider) GetTargetParameterName() string {
+func (p *singleClusterProvider) GetTargetParameterName() string {
return ""
}
-func (s *singleClusterProvider) WatchTargets(watch func() error) {
- s.manager.WatchKubeConfig(watch)
+func (p *singleClusterProvider) WatchTargets(watch func() error) {
+ p.manager.WatchKubeConfig(watch)
}
-func (s *singleClusterProvider) Close() {
- s.manager.Close()
+func (p *singleClusterProvider) Close() {
+ p.manager.Close()
}
diff --git a/pkg/kubernetes/provider_single_test.go b/pkg/kubernetes/provider_single_test.go
new file mode 100644
index 00000000..150926b4
--- /dev/null
+++ b/pkg/kubernetes/provider_single_test.go
@@ -0,0 +1,116 @@
+package kubernetes
+
+import (
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/client-go/rest"
+)
+
+type ProviderSingleTestSuite struct {
+ BaseProviderSuite
+ mockServer *test.MockServer
+ originalIsInClusterConfig func() (*rest.Config, error)
+ provider Provider
+}
+
+func (s *ProviderSingleTestSuite) SetupTest() {
+ // Single cluster provider is used when in-cluster or when the multi-cluster feature is disabled.
+ // For this test suite we simulate an in-cluster deployment.
+ s.originalIsInClusterConfig = InClusterConfig
+ s.mockServer = test.NewMockServer()
+ InClusterConfig = func() (*rest.Config, error) {
+ return s.mockServer.Config(), nil
+ }
+ provider, err := NewProvider(&config.StaticConfig{})
+ s.Require().NoError(err, "Expected no error creating provider with kubeconfig")
+ s.provider = provider
+}
+
+func (s *ProviderSingleTestSuite) TearDownTest() {
+ InClusterConfig = s.originalIsInClusterConfig
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *ProviderSingleTestSuite) TestType() {
+ s.IsType(&singleClusterProvider{}, s.provider)
+}
+
+func (s *ProviderSingleTestSuite) TestWithNonOpenShiftCluster() {
+ s.Run("IsOpenShift returns false", func() {
+ inOpenShift := s.provider.IsOpenShift(s.T().Context())
+ s.False(inOpenShift, "Expected InOpenShift to return false")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestWithOpenShiftCluster() {
+ s.mockServer.Handle(&test.InOpenShiftHandler{})
+
+ s.Run("IsOpenShift returns true", func() {
+ inOpenShift := s.provider.IsOpenShift(s.T().Context())
+ s.True(inOpenShift, "Expected InOpenShift to return true")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestVerifyToken() {
+ s.mockServer.Handle(&test.TokenReviewHandler{})
+
+ s.Run("VerifyToken returns UserInfo for empty target (default target)", func() {
+ userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience")
+ s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
+ s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
+ s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
+ s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
+ s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
+ s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
+ s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
+ })
+ s.Run("VerifyToken returns error for non-empty context", func() {
+ userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "non-empty", "the-token", "the-audience")
+ s.Require().Error(err, "Expected error from VerifyToken with non-empty target")
+ s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster")
+ s.Nil(userInfo, "Expected no UserInfo from VerifyToken with non-empty target")
+ s.Nil(audiences, "Expected no audiences from VerifyToken with non-empty target")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestGetTargets() {
+ s.Run("GetTargets returns single empty target", func() {
+ targets, err := s.provider.GetTargets(s.T().Context())
+ s.Require().NoError(err, "Expected no error from GetTargets")
+ s.Len(targets, 1, "Expected 1 targets from GetTargets")
+ s.Contains(targets, "", "Expected empty target from GetTargets")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestGetDerivedKubernetes() {
+ s.Run("GetDerivedKubernetes returns Kubernetes for empty target", func() {
+ k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "")
+ s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty target")
+ s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty target")
+ })
+ s.Run("GetDerivedKubernetes returns error for non-empty target", func() {
+ k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "non-empty-target")
+ s.Require().Error(err, "Expected error from GetDerivedKubernetes with non-empty target")
+ s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster")
+ s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with non-empty target")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestGetDefaultTarget() {
+ s.Run("GetDefaultTarget returns empty string", func() {
+ s.Empty(s.provider.GetDefaultTarget(), "Expected fake-context as default target")
+ })
+}
+
+func (s *ProviderSingleTestSuite) TestGetTargetParameterName() {
+ s.Empty(s.provider.GetTargetParameterName(), "Expected empty string as target parameter name")
+}
+
+func TestProviderSingle(t *testing.T) {
+ suite.Run(t, new(ProviderSingleTestSuite))
+}
diff --git a/pkg/kubernetes/provider_test.go b/pkg/kubernetes/provider_test.go
index eca718f8..32ea5668 100644
--- a/pkg/kubernetes/provider_test.go
+++ b/pkg/kubernetes/provider_test.go
@@ -1,6 +1,7 @@
package kubernetes
import (
+ "os"
"strings"
"testing"
@@ -31,91 +32,133 @@ func (s *BaseProviderSuite) TearDownTest() {
type ProviderTestSuite struct {
BaseProviderSuite
+ originalEnv []string
+ originalInClusterConfig func() (*rest.Config, error)
+ mockServer *test.MockServer
+ kubeconfigPath string
}
-func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
- originalIsInClusterConfig := InClusterConfig
- s.T().Cleanup(func() {
- InClusterConfig = originalIsInClusterConfig
- })
+func (s *ProviderTestSuite) SetupTest() {
+ s.BaseProviderSuite.SetupTest()
+ s.originalEnv = os.Environ()
+ s.originalInClusterConfig = InClusterConfig
+ s.mockServer = test.NewMockServer()
+ s.kubeconfigPath = strings.ReplaceAll(s.mockServer.KubeconfigFile(s.T()), `\`, `\\`)
+}
+
+func (s *ProviderTestSuite) TearDownTest() {
+ s.BaseProviderSuite.TearDownTest()
+ test.RestoreEnv(s.originalEnv)
+ InClusterConfig = s.originalInClusterConfig
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *ProviderTestSuite) TestNewProviderInCluster() {
InClusterConfig = func() (*rest.Config, error) {
return &rest.Config{}, nil
}
s.Run("With no cluster_provider_strategy, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte{}))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for in-cluster provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
- s.Run("With configured in-cluster cluster_provider_strategy, returns single-cluster provider", func() {
+ s.Run("With cluster_provider_strategy=in-cluster, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "in-cluster"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for single-cluster strategy")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
- s.Run("With configured kubeconfig cluster_provider_strategy, returns error", func() {
+ s.Run("With cluster_provider_strategy=kubeconfig, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "kubeconfig"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for kubeconfig strategy")
s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
- s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
+ s.Run("With cluster_provider_strategy=kubeconfig and kubeconfig set to valid path, returns kubeconfig provider", func() {
+ cfg := test.Must(config.ReadToml([]byte(`
+ cluster_provider_strategy = "kubeconfig"
+ kubeconfig = "` + s.kubeconfigPath + `"
+ `)))
+ provider, err := NewProvider(cfg)
+ s.Require().NoError(err, "Expected no error for kubeconfig strategy")
+ s.NotNil(provider, "Expected provider instance")
+ s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
+ })
+ s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "i-do-not-exist"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
}
-func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
- mockServer := test.NewMockServer()
- s.T().Cleanup(mockServer.Close)
- kubeconfigPath := strings.ReplaceAll(mockServer.KubeconfigFile(s.T()), `\`, `\\`)
+func (s *ProviderTestSuite) TestNewProviderLocal() {
+ InClusterConfig = func() (*rest.Config, error) {
+ return nil, rest.ErrNotInCluster
+ }
+ s.Require().NoError(os.Setenv("KUBECONFIG", s.kubeconfigPath))
s.Run("With no cluster_provider_strategy, returns kubeconfig provider", func() {
- cfg := test.Must(config.ReadToml([]byte(`
- kubeconfig = "` + kubeconfigPath + `"
- `)))
- provider, err := NewManagerProvider(cfg)
+ cfg := test.Must(config.ReadToml([]byte{}))
+ provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
- s.Run("With configured kubeconfig cluster_provider_strategy, returns kubeconfig provider", func() {
+ s.Run("With cluster_provider_strategy=kubeconfig, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
- kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "kubeconfig"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
- s.Run("With configured in-cluster cluster_provider_strategy, returns error", func() {
+ s.Run("With cluster_provider_strategy=disabled, returns single-cluster provider", func() {
+ cfg := test.Must(config.ReadToml([]byte(`
+ cluster_provider_strategy = "disabled"
+ `)))
+ provider, err := NewProvider(cfg)
+ s.Require().NoError(err, "Expected no error for disabled strategy")
+ s.NotNil(provider, "Expected provider instance")
+ s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
+ })
+ s.Run("With cluster_provider_strategy=in-cluster, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
- kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "in-cluster"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for in-cluster strategy")
s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
- s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
+ s.Run("With cluster_provider_strategy=in-cluster and kubeconfig set to valid path, returns error", func() {
+ cfg := test.Must(config.ReadToml([]byte(`
+ kubeconfig = "` + s.kubeconfigPath + `"
+ cluster_provider_strategy = "in-cluster"
+ `)))
+ provider, err := NewProvider(cfg)
+ s.Require().Error(err, "Expected error for in-cluster strategy")
+ s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error())
+ s.Nilf(provider, "Expected no provider instance, got %v", provider)
+ })
+ s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
- kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "i-do-not-exist"
`)))
- provider, err := NewManagerProvider(cfg)
+ provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
diff --git a/pkg/kubernetes/token.go b/pkg/kubernetes/token.go
index d81f4135..f81c3a88 100644
--- a/pkg/kubernetes/token.go
+++ b/pkg/kubernetes/token.go
@@ -2,39 +2,10 @@ package kubernetes
import (
"context"
- "fmt"
authenticationv1api "k8s.io/api/authentication/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
- tokenReviewClient, err := m.accessControlClientSet.TokenReview()
- if err != nil {
- return nil, nil, err
- }
- tokenReview := &authenticationv1api.TokenReview{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "authentication.k8s.io/v1",
- Kind: "TokenReview",
- },
- Spec: authenticationv1api.TokenReviewSpec{
- Token: token,
- Audiences: []string{audience},
- },
- }
-
- result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
- if err != nil {
- return nil, nil, fmt.Errorf("failed to create token review: %v", err)
- }
-
- if !result.Status.Authenticated {
- if result.Status.Error != "" {
- return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
- }
- return nil, nil, fmt.Errorf("token authentication failed")
- }
-
- return &result.Status.User, result.Status.Audiences, nil
+type TokenVerifier interface {
+ VerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationv1api.UserInfo, []string, error)
}
diff --git a/pkg/mcp/common_test.go b/pkg/mcp/common_test.go
index e9c49758..b91df691 100644
--- a/pkg/mcp/common_test.go
+++ b/pkg/mcp/common_test.go
@@ -1,23 +1,16 @@
package mcp
import (
- "bytes"
"context"
"encoding/json"
- "flag"
"fmt"
- "net/http/httptest"
"os"
"path/filepath"
"runtime"
- "strconv"
"testing"
"time"
- "github.com/mark3labs/mcp-go/client"
"github.com/mark3labs/mcp-go/client/transport"
- "github.com/mark3labs/mcp-go/mcp"
- "github.com/mark3labs/mcp-go/server"
"github.com/pkg/errors"
"github.com/spf13/afero"
"github.com/stretchr/testify/suite"
@@ -30,11 +23,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
- clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
toolswatch "k8s.io/client-go/tools/watch"
- "k8s.io/klog/v2"
- "k8s.io/klog/v2/textlogger"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/tools/setup-envtest/env"
@@ -45,7 +34,6 @@ import (
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/containers/kubernetes-mcp-server/pkg/config"
- "github.com/containers/kubernetes-mcp-server/pkg/output"
)
// envTest has an expensive setup, so we only want to do it once per entire test run.
@@ -103,251 +91,6 @@ func TestMain(m *testing.M) {
os.Exit(code)
}
-type mcpContext struct {
- toolsets []string
- listOutput output.Output
- logLevel int
-
- staticConfig *config.StaticConfig
- clientOptions []transport.ClientOption
- before func(*mcpContext)
- after func(*mcpContext)
- ctx context.Context
- tempDir string
- cancel context.CancelFunc
- mcpServer *Server
- mcpHttpServer *httptest.Server
- mcpClient *client.Client
- klogState klog.State
- logBuffer bytes.Buffer
-}
-
-func (c *mcpContext) beforeEach(t *testing.T) {
- var err error
- c.ctx, c.cancel = context.WithCancel(t.Context())
- c.tempDir = t.TempDir()
- c.withKubeConfig(nil)
- if c.staticConfig == nil {
- c.staticConfig = config.Default()
- // Default to use YAML output for lists (previously the default)
- c.staticConfig.ListOutput = "yaml"
- }
- if c.toolsets != nil {
- c.staticConfig.Toolsets = c.toolsets
-
- }
- if c.listOutput != nil {
- c.staticConfig.ListOutput = c.listOutput.GetName()
- }
- if c.before != nil {
- c.before(c)
- }
- // Set up logging
- c.klogState = klog.CaptureState()
- flags := flag.NewFlagSet("test", flag.ContinueOnError)
- klog.InitFlags(flags)
- _ = flags.Set("v", strconv.Itoa(c.logLevel))
- klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(c.logLevel), textlogger.Output(&c.logBuffer))))
- // MCP Server
- if c.mcpServer, err = NewServer(Configuration{StaticConfig: c.staticConfig}); err != nil {
- t.Fatal(err)
- return
- }
- c.mcpHttpServer = server.NewTestServer(c.mcpServer.server, server.WithSSEContextFunc(contextFunc))
- if c.mcpClient, err = client.NewSSEMCPClient(c.mcpHttpServer.URL+"/sse", c.clientOptions...); err != nil {
- t.Fatal(err)
- return
- }
- // MCP Client
- if err = c.mcpClient.Start(c.ctx); err != nil {
- t.Fatal(err)
- return
- }
- initRequest := mcp.InitializeRequest{}
- initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION
- initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"}
- _, err = c.mcpClient.Initialize(c.ctx, initRequest)
- if err != nil {
- t.Fatal(err)
- return
- }
-}
-
-func (c *mcpContext) afterEach() {
- if c.after != nil {
- c.after(c)
- }
- c.cancel()
- c.mcpServer.Close()
- _ = c.mcpClient.Close()
- c.mcpHttpServer.Close()
- c.klogState.Restore()
-}
-
-func testCase(t *testing.T, test func(c *mcpContext)) {
- testCaseWithContext(t, &mcpContext{}, test)
-}
-
-func testCaseWithContext(t *testing.T, mcpCtx *mcpContext, test func(c *mcpContext)) {
- mcpCtx.beforeEach(t)
- defer mcpCtx.afterEach()
- test(mcpCtx)
-}
-
-// withKubeConfig sets up a fake kubeconfig in the temp directory based on the provided rest.Config
-func (c *mcpContext) withKubeConfig(rc *rest.Config) *clientcmdapi.Config {
- fakeConfig := clientcmdapi.NewConfig()
- fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster()
- fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443"
- fakeConfig.Clusters["additional-cluster"] = clientcmdapi.NewCluster()
- fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo()
- fakeConfig.AuthInfos["additional-auth"] = clientcmdapi.NewAuthInfo()
- if rc != nil {
- fakeConfig.Clusters["fake"].Server = rc.Host
- fakeConfig.Clusters["fake"].CertificateAuthorityData = rc.CAData
- fakeConfig.AuthInfos["fake"].ClientKeyData = rc.KeyData
- fakeConfig.AuthInfos["fake"].ClientCertificateData = rc.CertData
- }
- fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext()
- fakeConfig.Contexts["fake-context"].Cluster = "fake"
- fakeConfig.Contexts["fake-context"].AuthInfo = "fake"
- fakeConfig.Contexts["additional-context"] = clientcmdapi.NewContext()
- fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster"
- fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth"
- fakeConfig.CurrentContext = "fake-context"
- kubeConfig := filepath.Join(c.tempDir, "config")
- _ = clientcmd.WriteToFile(*fakeConfig, kubeConfig)
- _ = os.Setenv("KUBECONFIG", kubeConfig)
- if c.mcpServer != nil {
- if err := c.mcpServer.reloadKubernetesClusterProvider(); err != nil {
- panic(err)
- }
- }
- return fakeConfig
-}
-
-// withEnvTest sets up the environment for kubeconfig to be used with envTest
-func (c *mcpContext) withEnvTest() {
- c.withKubeConfig(envTestRestConfig)
-}
-
-// inOpenShift sets up the kubernetes environment to seem to be running OpenShift
-func inOpenShift(c *mcpContext) {
- c.withEnvTest()
- crdTemplate := `
- {
- "apiVersion": "apiextensions.k8s.io/v1",
- "kind": "CustomResourceDefinition",
- "metadata": {"name": "%s"},
- "spec": {
- "group": "%s",
- "versions": [{
- "name": "v1","served": true,"storage": true,
- "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}}
- }],
- "scope": "%s",
- "names": {"plural": "%s","singular": "%s","kind": "%s"}
- }
- }`
- tasks, _ := errgroup.WithContext(c.ctx)
- tasks.Go(func() error {
- return c.crdApply(fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io",
- "Cluster", "projects", "project", "Project"))
- })
- tasks.Go(func() error {
- return c.crdApply(fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io",
- "Namespaced", "routes", "route", "Route"))
- })
- if err := tasks.Wait(); err != nil {
- panic(err)
- }
-}
-
-// inOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift
-func inOpenShiftClear(c *mcpContext) {
- tasks, _ := errgroup.WithContext(c.ctx)
- tasks.Go(func() error { return c.crdDelete("projects.project.openshift.io") })
- tasks.Go(func() error { return c.crdDelete("routes.route.openshift.io") })
- if err := tasks.Wait(); err != nil {
- panic(err)
- }
-}
-
-// newKubernetesClient creates a new Kubernetes client with the envTest kubeconfig
-func (c *mcpContext) newKubernetesClient() *kubernetes.Clientset {
- return kubernetes.NewForConfigOrDie(envTestRestConfig)
-}
-
-// newApiExtensionsClient creates a new ApiExtensions client with the envTest kubeconfig
-func (c *mcpContext) newApiExtensionsClient() *apiextensionsv1.ApiextensionsV1Client {
- return apiextensionsv1.NewForConfigOrDie(envTestRestConfig)
-}
-
-// crdApply creates a CRD from the provided resource string and waits for it to be established
-func (c *mcpContext) crdApply(resource string) error {
- apiExtensionsV1Client := c.newApiExtensionsClient()
- var crd = &apiextensionsv1spec.CustomResourceDefinition{}
- err := json.Unmarshal([]byte(resource), crd)
- if err != nil {
- return fmt.Errorf("failed to create CRD %v", err)
- }
- _, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(c.ctx, crd, metav1.CreateOptions{})
- if err != nil {
- return fmt.Errorf("failed to create CRD %v", err)
- }
- c.crdWaitUntilReady(crd.Name)
- return nil
-}
-
-// crdDelete deletes a CRD by name and waits for it to be removed
-func (c *mcpContext) crdDelete(name string) error {
- apiExtensionsV1Client := c.newApiExtensionsClient()
- err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(c.ctx, name, metav1.DeleteOptions{
- GracePeriodSeconds: ptr.To(int64(0)),
- })
- iteration := 0
- for iteration < 100 {
- if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, name, metav1.GetOptions{}); derr != nil {
- break
- }
- time.Sleep(5 * time.Millisecond)
- iteration++
- }
- if err != nil {
- return errors.Wrap(err, "failed to delete CRD")
- }
- return nil
-}
-
-// crdWaitUntilReady waits for a CRD to be established
-func (c *mcpContext) crdWaitUntilReady(name string) {
- watcher, err := c.newApiExtensionsClient().CustomResourceDefinitions().Watch(c.ctx, metav1.ListOptions{
- FieldSelector: "metadata.name=" + name,
- })
- if err != nil {
- panic(fmt.Errorf("failed to watch CRD %v", err))
- }
- _, err = toolswatch.UntilWithoutRetry(c.ctx, watcher, func(event watch.Event) (bool, error) {
- for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions {
- if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue {
- return true, nil
- }
- }
- return false, nil
- })
- if err != nil {
- panic(fmt.Errorf("failed to wait for CRD %v", err))
- }
-}
-
-// callTool helper function to call a tool by name with arguments
-func (c *mcpContext) callTool(name string, args map[string]interface{}) (*mcp.CallToolResult, error) {
- callToolRequest := mcp.CallToolRequest{}
- callToolRequest.Params.Name = name
- callToolRequest.Params.Arguments = args
- return c.mcpClient.CallTool(c.ctx, callToolRequest)
-}
-
func restoreAuth(ctx context.Context) {
kubernetesAdmin := kubernetes.NewForConfigOrDie(envTest.Config)
// Authorization
@@ -443,9 +186,104 @@ func (s *BaseMcpSuite) TearDownTest() {
}
}
-func (s *BaseMcpSuite) InitMcpClient() {
+func (s *BaseMcpSuite) InitMcpClient(options ...transport.StreamableHTTPCOption) {
var err error
s.mcpServer, err = NewServer(Configuration{StaticConfig: s.Cfg})
s.Require().NoError(err, "Expected no error creating MCP server")
- s.McpClient = test.NewMcpClient(s.T(), s.mcpServer.ServeHTTP(nil))
+ s.McpClient = test.NewMcpClient(s.T(), s.mcpServer.ServeHTTP(nil), options...)
+}
+
+// EnvTestInOpenShift sets up the kubernetes environment to seem to be running OpenShift
+func EnvTestInOpenShift(ctx context.Context) error {
+ crdTemplate := `
+ {
+ "apiVersion": "apiextensions.k8s.io/v1",
+ "kind": "CustomResourceDefinition",
+ "metadata": {"name": "%s"},
+ "spec": {
+ "group": "%s",
+ "versions": [{
+ "name": "v1","served": true,"storage": true,
+ "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}}
+ }],
+ "scope": "%s",
+ "names": {"plural": "%s","singular": "%s","kind": "%s"}
+ }
+ }`
+ tasks, _ := errgroup.WithContext(ctx)
+ tasks.Go(func() error {
+ return EnvTestCrdApply(ctx, fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io",
+ "Cluster", "projects", "project", "Project"))
+ })
+ tasks.Go(func() error {
+ return EnvTestCrdApply(ctx, fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io",
+ "Namespaced", "routes", "route", "Route"))
+ })
+ return tasks.Wait()
+}
+
+// EnvTestInOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift
+func EnvTestInOpenShiftClear(ctx context.Context) error {
+ tasks, _ := errgroup.WithContext(ctx)
+ tasks.Go(func() error { return EnvTestCrdDelete(ctx, "projects.project.openshift.io") })
+ tasks.Go(func() error { return EnvTestCrdDelete(ctx, "routes.route.openshift.io") })
+ return tasks.Wait()
+}
+
+// EnvTestCrdWaitUntilReady waits for a CRD to be established
+func EnvTestCrdWaitUntilReady(ctx context.Context, name string) error {
+ apiExtensionClient := apiextensionsv1.NewForConfigOrDie(envTestRestConfig)
+ watcher, err := apiExtensionClient.CustomResourceDefinitions().Watch(ctx, metav1.ListOptions{
+ FieldSelector: "metadata.name=" + name,
+ })
+ if err != nil {
+ return fmt.Errorf("unable to watch CRDs: %w", err)
+ }
+ _, err = toolswatch.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
+ for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions {
+ if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue {
+ return true, nil
+ }
+ }
+ return false, nil
+ })
+ if err != nil {
+ return fmt.Errorf("failed to wait for CRD: %w", err)
+ }
+ return nil
+}
+
+// EnvTestCrdApply creates a CRD from the provided resource string and waits for it to be established
+func EnvTestCrdApply(ctx context.Context, resource string) error {
+ apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig)
+ var crd = &apiextensionsv1spec.CustomResourceDefinition{}
+ err := json.Unmarshal([]byte(resource), crd)
+ if err != nil {
+ return fmt.Errorf("failed to create CRD %v", err)
+ }
+ _, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{})
+ if err != nil {
+ return fmt.Errorf("failed to create CRD %v", err)
+ }
+ return EnvTestCrdWaitUntilReady(ctx, crd.Name)
+}
+
+// crdDelete deletes a CRD by name and waits for it to be removed
+func EnvTestCrdDelete(ctx context.Context, name string) error {
+ apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig)
+ err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(ctx, name, metav1.DeleteOptions{
+ GracePeriodSeconds: ptr.To(int64(0)),
+ })
+ iteration := 0
+ for iteration < 100 {
+ if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{}); derr != nil {
+ break
+ }
+ time.Sleep(5 * time.Millisecond)
+ iteration++
+ }
+ if err != nil {
+ return errors.Wrap(err, "failed to delete CRD")
+ }
+ return nil
}
diff --git a/pkg/mcp/events_test.go b/pkg/mcp/events_test.go
index 6d771bca..68ca85a8 100644
--- a/pkg/mcp/events_test.go
+++ b/pkg/mcp/events_test.go
@@ -126,6 +126,7 @@ func (s *EventsSuite) TestEventsListDenied() {
s.InitMcpClient()
s.Run("events_list (denied)", func() {
toolResult, err := s.CallTool("events_list", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
s.Run("has error", func() {
s.Truef(toolResult.IsError, "call tool should fail")
s.Nilf(err, "call tool should not return error object")
diff --git a/pkg/mcp/m3labs.go b/pkg/mcp/m3labs.go
index bae6aeb7..ade0f56b 100644
--- a/pkg/mcp/m3labs.go
+++ b/pkg/mcp/m3labs.go
@@ -39,15 +39,9 @@ func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.S
m3labTool.RawInputSchema = schema
}
m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
- // get the correct internalk8s.Manager for the target specified in the request
+ // get the correct derived Kubernetes client for the target specified in the request
cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget())
- m, err := s.p.GetManagerFor(ctx, cluster)
- if err != nil {
- return nil, err
- }
-
- // derive the manager based on auth on top of the settings for the cluster
- k, err := m.Derived(ctx)
+ k, err := s.p.GetDerivedKubernetes(ctx, cluster)
if err != nil {
return nil, err
}
diff --git a/pkg/mcp/mcp.go b/pkg/mcp/mcp.go
index d8e91775..5f7511cc 100644
--- a/pkg/mcp/mcp.go
+++ b/pkg/mcp/mcp.go
@@ -67,14 +67,12 @@ type Server struct {
configuration *Configuration
server *server.MCPServer
enabledTools []string
- p internalk8s.ManagerProvider
+ p internalk8s.Provider
}
func NewServer(configuration Configuration) (*Server, error) {
var serverOptions []server.ServerOption
serverOptions = append(serverOptions,
- server.WithResourceCapabilities(true, true),
- server.WithPromptCapabilities(true),
server.WithToolCapabilities(true),
server.WithLogging(),
server.WithToolHandlerMiddleware(toolCallLoggingMiddleware),
@@ -101,7 +99,7 @@ func NewServer(configuration Configuration) (*Server, error) {
func (s *Server) reloadKubernetesClusterProvider() error {
ctx := context.Background()
- p, err := internalk8s.NewManagerProvider(s.configuration.StaticConfig)
+ p, err := internalk8s.NewProvider(s.configuration.StaticConfig)
if err != nil {
return err
}
@@ -113,11 +111,6 @@ func (s *Server) reloadKubernetesClusterProvider() error {
s.p = p
- k, err := s.p.GetManagerFor(ctx, s.p.GetDefaultTarget())
- if err != nil {
- return err
- }
-
targets, err := p.GetTargets(ctx)
if err != nil {
return err
@@ -136,7 +129,7 @@ func (s *Server) reloadKubernetesClusterProvider() error {
applicableTools := make([]api.ServerTool, 0)
for _, toolset := range s.configuration.Toolsets() {
- for _, tool := range toolset.GetTools(k) {
+ for _, tool := range toolset.GetTools(p) {
tool := mutator(tool)
if !filter(tool) {
continue
@@ -182,23 +175,11 @@ func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer
// KubernetesApiVerifyToken verifies the given token with the audience by
// sending an TokenReview request to API Server for the specified cluster.
-func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string, cluster string) (*authenticationapiv1.UserInfo, []string, error) {
+func (s *Server) KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) {
if s.p == nil {
return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized")
}
-
- // Use provided cluster or default
- if cluster == "" {
- cluster = s.p.GetDefaultTarget()
- }
-
- // Get the cluster manager for the specified cluster
- m, err := s.p.GetManagerFor(ctx, cluster)
- if err != nil {
- return nil, nil, err
- }
-
- return m.VerifyToken(ctx, token, audience)
+ return s.p.VerifyToken(ctx, cluster, token, audience)
}
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
diff --git a/pkg/mcp/mcp_middleware_test.go b/pkg/mcp/mcp_middleware_test.go
new file mode 100644
index 00000000..ce88e7b4
--- /dev/null
+++ b/pkg/mcp/mcp_middleware_test.go
@@ -0,0 +1,87 @@
+package mcp
+
+import (
+ "bytes"
+ "flag"
+ "regexp"
+ "strconv"
+ "testing"
+
+ "github.com/mark3labs/mcp-go/client/transport"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/klog/v2"
+ "k8s.io/klog/v2/textlogger"
+)
+
+type McpLoggingSuite struct {
+ BaseMcpSuite
+ klogState klog.State
+ logBuffer bytes.Buffer
+}
+
+func (s *McpLoggingSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.klogState = klog.CaptureState()
+}
+
+func (s *McpLoggingSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ s.klogState.Restore()
+}
+
+func (s *McpLoggingSuite) SetLogLevel(level int) {
+ flags := flag.NewFlagSet("test", flag.ContinueOnError)
+ klog.InitFlags(flags)
+ _ = flags.Set("v", strconv.Itoa(level))
+ klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(level), textlogger.Output(&s.logBuffer))))
+}
+
+func (s *McpLoggingSuite) TestLogsToolCall() {
+ s.SetLogLevel(5)
+ s.InitMcpClient()
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call to tool configuration_view failed")
+
+ s.Run("Logs tool name", func() {
+ s.Contains(s.logBuffer.String(), "mcp tool call: configuration_view(")
+ })
+ s.Run("Logs tool call arguments", func() {
+ expected := `"mcp tool call: configuration_view\((.+)\)"`
+ m := regexp.MustCompile(expected).FindStringSubmatch(s.logBuffer.String())
+ s.Len(m, 2, "Expected log entry to contain arguments")
+ s.Equal("map[minified:false]", m[1], "Expected log arguments to be 'map[minified:false]'")
+ })
+}
+
+func (s *McpLoggingSuite) TestLogsToolCallHeaders() {
+ s.SetLogLevel(7)
+ s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{
+ "Accept-Encoding": "gzip",
+ "Authorization": "Bearer should-not-be-logged",
+ "authorization": "Bearer should-not-be-logged",
+ "a-loggable-header": "should-be-logged",
+ }))
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call to tool configuration_view failed")
+
+ s.Run("Logs tool call headers", func() {
+ expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged"
+ s.Contains(s.logBuffer.String(), expectedLog, "Expected log to contain loggable header")
+ })
+ sensitiveHeaders := []string{
+ "Authorization:",
+ // TODO: Add more sensitive headers as needed
+ }
+ s.Run("Does not log sensitive headers", func() {
+ for _, header := range sensitiveHeaders {
+ s.NotContains(s.logBuffer.String(), header, "Log should not contain sensitive header")
+ }
+ })
+ s.Run("Does not log sensitive header values", func() {
+ s.NotContains(s.logBuffer.String(), "should-not-be-logged", "Log should not contain sensitive header value")
+ })
+}
+
+func TestMcpLogging(t *testing.T) {
+ suite.Run(t, new(McpLoggingSuite))
+}
diff --git a/pkg/mcp/mcp_test.go b/pkg/mcp/mcp_test.go
index 7be9a423..25e1c651 100644
--- a/pkg/mcp/mcp_test.go
+++ b/pkg/mcp/mcp_test.go
@@ -1,63 +1,27 @@
package mcp
import (
- "context"
"net/http"
- "os"
- "path/filepath"
- "runtime"
"testing"
- "time"
"github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/mark3labs/mcp-go/client"
- "github.com/mark3labs/mcp-go/mcp"
+ "github.com/mark3labs/mcp-go/client/transport"
+ "github.com/stretchr/testify/suite"
)
-func TestWatchKubeConfig(t *testing.T) {
- if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
- t.Skip("Skipping test on non-Unix-like platforms")
- }
- testCase(t, func(c *mcpContext) {
- // Given
- withTimeout, cancel := context.WithTimeout(c.ctx, 5*time.Second)
- defer cancel()
- var notification *mcp.JSONRPCNotification
- c.mcpClient.OnNotification(func(n mcp.JSONRPCNotification) {
- notification = &n
- })
- // When
- f, _ := os.OpenFile(filepath.Join(c.tempDir, "config"), os.O_APPEND|os.O_WRONLY, 0644)
- _, _ = f.WriteString("\n")
- for notification == nil {
- select {
- case <-withTimeout.Done():
- default:
- time.Sleep(100 * time.Millisecond)
- }
- }
- // Then
- t.Run("WatchKubeConfig notifies tools change", func(t *testing.T) {
- if notification == nil {
- t.Fatalf("WatchKubeConfig did not notify")
- }
- if notification.Method != "notifications/tools/list_changed" {
- t.Fatalf("WatchKubeConfig did not notify tools change, got %s", notification.Method)
- }
- })
- })
+type McpHeadersSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+ pathHeaders map[string]http.Header
}
-func TestSseHeaders(t *testing.T) {
- mockServer := test.NewMockServer()
- defer mockServer.Close()
- before := func(c *mcpContext) {
- c.withKubeConfig(mockServer.Config())
- c.clientOptions = append(c.clientOptions, client.WithHeaders(map[string]string{"kubernetes-authorization": "Bearer a-token-from-mcp-client"}))
- }
- pathHeaders := make(map[string]http.Header, 0)
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- pathHeaders[req.URL.Path] = req.Header.Clone()
+func (s *McpHeadersSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+ s.pathHeaders = make(map[string]http.Header)
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ s.pathHeaders[req.URL.Path] = req.Header.Clone()
// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
if req.URL.Path == "/api" {
w.Header().Set("Content-Type", "application/json")
@@ -90,38 +54,42 @@ func TestSseHeaders(t *testing.T) {
}
w.WriteHeader(404)
}))
- testCaseWithContext(t, &mcpContext{before: before}, func(c *mcpContext) {
- _, _ = c.callTool("pods_list", map[string]interface{}{})
- t.Run("DiscoveryClient propagates headers to Kube API", func(t *testing.T) {
- if len(pathHeaders) == 0 {
- t.Fatalf("No requests were made to Kube API")
- }
- if pathHeaders["/api"] == nil || pathHeaders["/api"].Get("Authorization") != "Bearer a-token-from-mcp-client" {
- t.Fatalf("Overridden header Authorization not found in request to /api")
- }
- if pathHeaders["/apis"] == nil || pathHeaders["/apis"].Get("Authorization") != "Bearer a-token-from-mcp-client" {
- t.Fatalf("Overridden header Authorization not found in request to /apis")
- }
- if pathHeaders["/api/v1"] == nil || pathHeaders["/api/v1"].Get("Authorization") != "Bearer a-token-from-mcp-client" {
- t.Fatalf("Overridden header Authorization not found in request to /api/v1")
- }
+}
+
+func (s *McpHeadersSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *McpHeadersSuite) TestAuthorizationHeaderPropagation() {
+ cases := []string{"kubernetes-authorization", "Authorization"}
+ for _, header := range cases {
+ s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{header: "Bearer a-token-from-mcp-client"}))
+ _, _ = s.CallTool("pods_list", map[string]interface{}{})
+ s.Require().Greater(len(s.pathHeaders), 0, "No requests were made to Kube API")
+ s.Run("DiscoveryClient propagates "+header+" header to Kube API", func() {
+ s.Require().NotNil(s.pathHeaders["/api"], "No requests were made to /api")
+ s.Equal("Bearer a-token-from-mcp-client", s.pathHeaders["/api"].Get("Authorization"), "Overridden header Authorization not found in request to /api")
+ s.Require().NotNil(s.pathHeaders["/apis"], "No requests were made to /apis")
+ s.Equal("Bearer a-token-from-mcp-client", s.pathHeaders["/apis"].Get("Authorization"), "Overridden header Authorization not found in request to /apis")
+ s.Require().NotNil(s.pathHeaders["/api/v1"], "No requests were made to /api/v1")
+ s.Equal("Bearer a-token-from-mcp-client", s.pathHeaders["/api/v1"].Get("Authorization"), "Overridden header Authorization not found in request to /api/v1")
})
- t.Run("DynamicClient propagates headers to Kube API", func(t *testing.T) {
- if len(pathHeaders) == 0 {
- t.Fatalf("No requests were made to Kube API")
- }
- if pathHeaders["/api/v1/namespaces/default/pods"] == nil || pathHeaders["/api/v1/namespaces/default/pods"].Get("Authorization") != "Bearer a-token-from-mcp-client" {
- t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods")
- }
+ s.Run("DynamicClient propagates "+header+" header to Kube API", func() {
+ s.Require().NotNil(s.pathHeaders["/api/v1/namespaces/default/pods"], "No requests were made to /api/v1/namespaces/default/pods")
+ s.Equal("Bearer a-token-from-mcp-client", s.pathHeaders["/api/v1/namespaces/default/pods"].Get("Authorization"), "Overridden header Authorization not found in request to /api/v1/namespaces/default/pods")
})
- _, _ = c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-to-delete"})
- t.Run("kubernetes.Interface propagates headers to Kube API", func(t *testing.T) {
- if len(pathHeaders) == 0 {
- t.Fatalf("No requests were made to Kube API")
- }
- if pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"] == nil || pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"].Get("Authorization") != "Bearer a-token-from-mcp-client" {
- t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods/a-pod-to-delete")
- }
+ _, _ = s.CallTool("pods_delete", map[string]interface{}{"name": "a-pod-to-delete"})
+ s.Run("kubernetes.Interface propagates "+header+" header to Kube API", func() {
+ s.Require().NotNil(s.pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"], "No requests were made to /api/v1/namespaces/default/pods/a-pod-to-delete")
+ s.Equal("Bearer a-token-from-mcp-client", s.pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"].Get("Authorization"), "Overridden header Authorization not found in request to /api/v1/namespaces/default/pods/a-pod-to-delete")
})
- })
+
+ }
+}
+
+func TestMcpHeaders(t *testing.T) {
+ suite.Run(t, new(McpHeadersSuite))
}
diff --git a/pkg/mcp/mcp_tools_test.go b/pkg/mcp/mcp_tools_test.go
index 196b93e2..f6b8a8be 100644
--- a/pkg/mcp/mcp_tools_test.go
+++ b/pkg/mcp/mcp_tools_test.go
@@ -1,180 +1,130 @@
package mcp
import (
- "regexp"
- "strings"
"testing"
- "github.com/mark3labs/mcp-go/client/transport"
+ "github.com/BurntSushi/toml"
"github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
"k8s.io/utils/ptr"
-
- "github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/containers/kubernetes-mcp-server/pkg/config"
)
-func TestUnrestricted(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{})
- t.Run("ListTools returns tools", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call ListTools failed %v", err)
- }
- })
- t.Run("Destructive tools ARE NOT read only", func(t *testing.T) {
- for _, tool := range tools.Tools {
- readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false)
- destructive := ptr.Deref(tool.Annotations.DestructiveHint, false)
- if readOnly && destructive {
- t.Errorf("Tool %s is read-only and destructive, which is not allowed", tool.Name)
- }
- }
- })
+// McpToolProcessingSuite tests MCP tool processing (isToolApplicable)
+type McpToolProcessingSuite struct {
+ BaseMcpSuite
+}
+
+func (s *McpToolProcessingSuite) TestUnrestricted() {
+ s.InitMcpClient()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NotNil(tools)
+
+ s.Run("ListTools returns tools", func() {
+ s.NoError(err, "call ListTools failed")
+ s.NotNilf(tools, "list tools failed")
+ })
+
+ s.Run("Destructive tools ARE NOT read only", func() {
+ for _, tool := range tools.Tools {
+ readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false)
+ destructive := ptr.Deref(tool.Annotations.DestructiveHint, false)
+ s.Falsef(readOnly && destructive, "Tool %s is read-only and destructive, which is not allowed", tool.Name)
+ }
})
}
-func TestReadOnly(t *testing.T) {
- readOnlyServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{ReadOnly: true} }
- testCaseWithContext(t, &mcpContext{before: readOnlyServer}, func(c *mcpContext) {
- tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{})
- t.Run("ListTools returns tools", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call ListTools failed %v", err)
- }
- })
- t.Run("ListTools returns only read-only tools", func(t *testing.T) {
- for _, tool := range tools.Tools {
- if tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint {
- t.Errorf("Tool %s is not read-only but should be", tool.Name)
- }
- if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint {
- t.Errorf("Tool %s is destructive but should not be in read-only mode", tool.Name)
- }
- }
- })
+func (s *McpToolProcessingSuite) TestReadOnly() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ read_only = true
+ `), s.Cfg), "Expected to parse read only server config")
+ s.InitMcpClient()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NotNil(tools)
+
+ s.Run("ListTools returns tools", func() {
+ s.NoError(err, "call ListTools failed")
+ s.NotNilf(tools, "list tools failed")
+ })
+
+ s.Run("ListTools returns only read-only tools", func() {
+ for _, tool := range tools.Tools {
+ s.Falsef(tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint,
+ "Tool %s is not read-only but should be", tool.Name)
+ s.Falsef(tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint,
+ "Tool %s is destructive but should not be in read-only mode", tool.Name)
+ }
})
}
-func TestDisableDestructive(t *testing.T) {
- disableDestructiveServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{DisableDestructive: true} }
- testCaseWithContext(t, &mcpContext{before: disableDestructiveServer}, func(c *mcpContext) {
- tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{})
- t.Run("ListTools returns tools", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call ListTools failed %v", err)
- }
- })
- t.Run("ListTools does not return destructive tools", func(t *testing.T) {
- for _, tool := range tools.Tools {
- if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint {
- t.Errorf("Tool %s is destructive but should not be", tool.Name)
- }
- }
- })
+func (s *McpToolProcessingSuite) TestDisableDestructive() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ disable_destructive = true
+ `), s.Cfg), "Expected to parse disable destructive server config")
+ s.InitMcpClient()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NotNil(tools)
+
+ s.Run("ListTools returns tools", func() {
+ s.NoError(err, "call ListTools failed")
+ s.NotNilf(tools, "list tools failed")
+ })
+
+ s.Run("ListTools does not return destructive tools", func() {
+ for _, tool := range tools.Tools {
+ s.Falsef(tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint,
+ "Tool %s is destructive but should not be in disable_destructive mode", tool.Name)
+ }
})
}
-func TestEnabledTools(t *testing.T) {
- enabledToolsServer := test.Must(config.ReadToml([]byte(`
+func (s *McpToolProcessingSuite) TestEnabledTools() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
enabled_tools = [ "namespaces_list", "events_list" ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: enabledToolsServer}, func(c *mcpContext) {
- tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{})
- t.Run("ListTools returns tools", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call ListTools failed %v", err)
- }
- })
- t.Run("ListTools returns only explicitly enabled tools", func(t *testing.T) {
- if len(tools.Tools) != 2 {
- t.Fatalf("ListTools should return 2 tools, got %d", len(tools.Tools))
- }
- for _, tool := range tools.Tools {
- if tool.Name != "namespaces_list" && tool.Name != "events_list" {
- t.Errorf("Tool %s is not enabled but should be", tool.Name)
- }
- }
- })
+ `), s.Cfg), "Expected to parse enabled tools server config")
+ s.InitMcpClient()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NotNil(tools)
+
+ s.Run("ListTools returns tools", func() {
+ s.NoError(err, "call ListTools failed")
+ s.NotNilf(tools, "list tools failed")
})
-}
-func TestDisabledTools(t *testing.T) {
- testCaseWithContext(t, &mcpContext{
- staticConfig: &config.StaticConfig{
- DisabledTools: []string{"namespaces_list", "events_list"},
- },
- }, func(c *mcpContext) {
- tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{})
- t.Run("ListTools returns tools", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call ListTools failed %v", err)
- }
- })
- t.Run("ListTools does not return disabled tools", func(t *testing.T) {
- for _, tool := range tools.Tools {
- if tool.Name == "namespaces_list" || tool.Name == "events_list" {
- t.Errorf("Tool %s is not disabled but should be", tool.Name)
- }
- }
- })
+ s.Run("ListTools returns only explicitly enabled tools", func() {
+ s.Len(tools.Tools, 2, "ListTools should return exactly 2 tools")
+ for _, tool := range tools.Tools {
+ s.Falsef(tool.Name != "namespaces_list" && tool.Name != "events_list",
+ "Tool %s is not enabled but should be", tool.Name)
+ }
})
}
-func TestToolCallLogging(t *testing.T) {
- testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) {
- _, _ = c.callTool("configuration_view", map[string]interface{}{
- "minified": false,
- })
- t.Run("Logs tool name", func(t *testing.T) {
- expectedLog := "mcp tool call: configuration_view("
- if !strings.Contains(c.logBuffer.String(), expectedLog) {
- t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String())
- }
- })
- t.Run("Logs tool call arguments", func(t *testing.T) {
- expected := `"mcp tool call: configuration_view\((.+)\)"`
- m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String())
- if len(m) != 2 {
- t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String())
- }
- if m[1] != "map[minified:false]" {
- t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1])
- }
- })
+func (s *McpToolProcessingSuite) TestDisabledTools() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ disabled_tools = [ "namespaces_list", "events_list" ]
+ `), s.Cfg), "Expected to parse disabled tools server config")
+ s.InitMcpClient()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NotNil(tools)
+
+ s.Run("ListTools returns tools", func() {
+ s.NoError(err, "call ListTools failed")
+ s.NotNilf(tools, "list tools failed")
})
- before := func(c *mcpContext) {
- c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{
- "Accept-Encoding": "gzip",
- "Authorization": "Bearer should-not-be-logged",
- "authorization": "Bearer should-not-be-logged",
- "a-loggable-header": "should-be-logged",
- }))
- }
- testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) {
- _, _ = c.callTool("configuration_view", map[string]interface{}{
- "minified": false,
- })
- t.Run("Logs tool call headers", func(t *testing.T) {
- expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged"
- if !strings.Contains(c.logBuffer.String(), expectedLog) {
- t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String())
- }
- })
- sensitiveHeaders := []string{
- "Authorization:",
- // TODO: Add more sensitive headers as needed
+
+ s.Run("ListTools does not return disabled tools", func() {
+ for _, tool := range tools.Tools {
+ s.Falsef(tool.Name == "namespaces_list" || tool.Name == "events_list",
+ "Tool %s is not disabled but should be", tool.Name)
}
- t.Run("Does not log sensitive headers", func(t *testing.T) {
- for _, header := range sensitiveHeaders {
- if strings.Contains(c.logBuffer.String(), header) {
- t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String())
- }
- }
- })
- t.Run("Does not log sensitive header values", func(t *testing.T) {
- if strings.Contains(c.logBuffer.String(), "should-not-be-logged") {
- t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String())
- }
- })
})
}
+
+func TestMcpToolProcessing(t *testing.T) {
+ suite.Run(t, new(McpToolProcessingSuite))
+}
diff --git a/pkg/mcp/mcp_watch_test.go b/pkg/mcp/mcp_watch_test.go
new file mode 100644
index 00000000..68287279
--- /dev/null
+++ b/pkg/mcp/mcp_watch_test.go
@@ -0,0 +1,103 @@
+package mcp
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
+)
+
+type WatchKubeConfigSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+}
+
+func (s *WatchKubeConfigSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+}
+
+func (s *WatchKubeConfigSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *WatchKubeConfigSuite) WriteKubeconfig() {
+ f, _ := os.OpenFile(s.Cfg.KubeConfig, os.O_APPEND|os.O_WRONLY, 0644)
+ _, _ = f.WriteString("\n")
+ _ = f.Close()
+}
+
+// WaitForNotification waits for an MCP server notification or fails the test after a timeout
+func (s *WatchKubeConfigSuite) WaitForNotification() *mcp.JSONRPCNotification {
+ withTimeout, cancel := context.WithTimeout(s.T().Context(), 5*time.Second)
+ defer cancel()
+ var notification *mcp.JSONRPCNotification
+ s.OnNotification(func(n mcp.JSONRPCNotification) {
+ notification = &n
+ })
+ for notification == nil {
+ select {
+ case <-withTimeout.Done():
+ s.FailNow("timeout waiting for WatchKubeConfig notification")
+ default:
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ return notification
+}
+
+func (s *WatchKubeConfigSuite) TestNotifiesToolsChange() {
+ // Given
+ s.InitMcpClient()
+ // When
+ s.WriteKubeconfig()
+ notification := s.WaitForNotification()
+ // Then
+ s.NotNil(notification, "WatchKubeConfig did not notify")
+ s.Equal("notifications/tools/list_changed", notification.Method, "WatchKubeConfig did not notify tools change")
+}
+
+func (s *WatchKubeConfigSuite) TestClearsNoLongerAvailableTools() {
+ s.mockServer.Handle(&test.InOpenShiftHandler{})
+ s.InitMcpClient()
+
+ s.Run("OpenShift tool is available", func() {
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NoError(err, "call ListTools failed")
+ s.Require().NotNil(tools, "list tools failed")
+ var found bool
+ for _, tool := range tools.Tools {
+ if tool.Name == "projects_list" {
+ found = true
+ break
+ }
+ }
+ s.Truef(found, "expected OpenShift tool to be available")
+ })
+
+ s.Run("OpenShift tool is removed after kubeconfig change", func() {
+ // Reload Config without OpenShift
+ s.mockServer.ResetHandlers()
+ s.WriteKubeconfig()
+ s.WaitForNotification()
+
+ tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
+ s.Require().NoError(err, "call ListTools failed")
+ s.Require().NotNil(tools, "list tools failed")
+ for _, tool := range tools.Tools {
+ s.Require().Falsef(tool.Name == "projects_list", "expected OpenShift tool to be removed")
+ }
+ })
+}
+
+func TestWatchKubeConfig(t *testing.T) {
+ suite.Run(t, new(WatchKubeConfigSuite))
+}
diff --git a/pkg/mcp/namespaces_test.go b/pkg/mcp/namespaces_test.go
index a0a6ff23..25565512 100644
--- a/pkg/mcp/namespaces_test.go
+++ b/pkg/mcp/namespaces_test.go
@@ -13,9 +13,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"sigs.k8s.io/yaml"
-
- "github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/containers/kubernetes-mcp-server/pkg/config"
)
type NamespacesSuite struct {
@@ -108,68 +105,67 @@ func (s *NamespacesSuite) TestNamespacesListAsTable() {
})
}
-func TestNamespaces(t *testing.T) {
- suite.Run(t, new(NamespacesSuite))
-}
+func (s *NamespacesSuite) TestProjectsListInOpenShift() {
+ s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift")
+ s.T().Cleanup(func() {
+ s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration")
+ })
+ s.InitMcpClient()
-func TestProjectsListInOpenShift(t *testing.T) {
- testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) {
+ s.Run("projects_list returns project list in OpenShift", func() {
dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig)
_, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "project.openshift.io", Version: "v1", Resource: "projects"}).
- Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{
+ Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "project.openshift.io/v1",
"kind": "Project",
"metadata": map[string]interface{}{
"name": "an-openshift-project",
},
}}, metav1.CreateOptions{})
- toolResult, err := c.callTool("projects_list", map[string]interface{}{})
- t.Run("projects_list returns project list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- }
+ toolResult, err := s.CallTool("projects_list", map[string]interface{}{})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
var decoded []unstructured.Unstructured
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- t.Run("projects_list has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("projects_list returns at least 1 items", func(t *testing.T) {
- if len(decoded) < 1 {
- t.Errorf("invalid project count, expected at least 1, got %v", len(decoded))
- }
+ s.Run("returns at least 1 item", func() {
+ s.GreaterOrEqualf(len(decoded), 1, "invalid project count, expected at least 1, got %v", len(decoded))
idx := slices.IndexFunc(decoded, func(ns unstructured.Unstructured) bool {
return ns.GetName() == "an-openshift-project"
})
- if idx == -1 {
- t.Errorf("namespace %s not found in the list", "an-openshift-project")
- }
+ s.NotEqualf(-1, idx, "namespace %s not found in the list", "an-openshift-project")
})
})
}
-func TestProjectsListInOpenShiftDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *NamespacesSuite) TestProjectsListInOpenShiftDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { group = "project.openshift.io", version = "v1" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) {
- c.withEnvTest()
- projectsList, _ := c.callTool("projects_list", map[string]interface{}{})
- t.Run("projects_list has error", func(t *testing.T) {
- if !projectsList.IsError {
- t.Fatalf("call tool should fail")
- }
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift")
+ s.T().Cleanup(func() {
+ s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration")
+ })
+ s.InitMcpClient()
+
+ s.Run("projects_list (denied)", func() {
+ projectsList, err := s.CallTool("projects_list", map[string]interface{}{})
+ s.Run("has error", func() {
+ s.Truef(projectsList.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("projects_list describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to list projects: resource not allowed: project.openshift.io/v1, Kind=Project"
- if projectsList.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, projectsList.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text)
})
})
}
+
+func TestNamespaces(t *testing.T) {
+ suite.Run(t, new(NamespacesSuite))
+}
diff --git a/pkg/mcp/nodes_test.go b/pkg/mcp/nodes_test.go
new file mode 100644
index 00000000..62ac55e9
--- /dev/null
+++ b/pkg/mcp/nodes_test.go
@@ -0,0 +1,336 @@
+package mcp
+
+import (
+ "net/http"
+ "strconv"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
+)
+
+type NodesSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+}
+
+func (s *NodesSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+}
+
+func (s *NodesSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *NodesSuite) TestNodesLog() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // Get Node response
+ if req.URL.Path == "/api/v1/nodes/existing-node" {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte(`{
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "name": "existing-node"
+ }
+ }`))
+ return
+ }
+ // Get Proxy Logs
+ if req.URL.Path == "/api/v1/nodes/existing-node/proxy/logs" {
+ w.Header().Set("Content-Type", "text/plain")
+ query := req.URL.Query().Get("query")
+ var logContent string
+ switch query {
+ case "/empty.log":
+ logContent = ""
+ case "/kubelet.log":
+ logContent = "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n"
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ _, err := strconv.Atoi(req.URL.Query().Get("tailLines"))
+ if err == nil {
+ logContent = "Line 4\nLine 5\n"
+ }
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte(logContent))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ s.InitMcpClient()
+ s.Run("nodes_log(name=nil)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing name", func() {
+ expectedMessage := "failed to get node log, missing argument name"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=existing-node, query=nil)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing name", func() {
+ expectedMessage := "failed to get node log, missing argument query"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=inexistent-node, query=/kubelet.log)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "inexistent-node",
+ "query": "/kubelet.log",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing node", func() {
+ expectedMessage := "failed to get node log for inexistent-node: failed to get node inexistent-node: the server could not find the requested resource (get nodes inexistent-node)"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=existing-node, query=/missing.log)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ "query": "/missing.log",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing log file", func() {
+ expectedMessage := "failed to get node log for existing-node: failed to get node logs: the server could not find the requested resource"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=existing-node, query=/empty.log)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ "query": "/empty.log",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes empty log", func() {
+ expectedMessage := "The node existing-node has not logged any message yet or the log file is empty"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive message '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=existing-node, query=/kubelet.log)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ "query": "/kubelet.log",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns full log", func() {
+ expectedMessage := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ for _, tailCase := range []interface{}{2, int64(2), float64(2)} {
+ s.Run("nodes_log(name=existing-node, query=/kubelet.log, tailLines=2)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ "query": "/kubelet.log",
+ "tailLines": tailCase,
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns tail log", func() {
+ expectedMessage := "Line 4\nLine 5\n"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_log(name=existing-node, query=/kubelet.log, tailLines=-1)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "existing-node",
+ "query": "/kubelet.log",
+ "tail": -1,
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns full log", func() {
+ expectedMessage := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ }
+}
+
+func (s *NodesSuite) TestNodesLogDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "Node" } ]
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("nodes_log (denied)", func() {
+ toolResult, err := s.CallTool("nodes_log", map[string]interface{}{
+ "name": "does-not-matter",
+ "query": "/does-not-matter-either.log",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
+ expectedMessage := "failed to get node log for does-not-matter: resource not allowed: /v1, Kind=Node"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+}
+
+func (s *NodesSuite) TestNodesStatsSummary() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // Get Node response
+ if req.URL.Path == "/api/v1/nodes/existing-node" {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte(`{
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "name": "existing-node"
+ }
+ }`))
+ return
+ }
+ // Get Stats Summary response
+ if req.URL.Path == "/api/v1/nodes/existing-node/proxy/stats/summary" {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte(`{
+ "node": {
+ "nodeName": "existing-node",
+ "cpu": {
+ "time": "2025-10-27T00:00:00Z",
+ "usageNanoCores": 1000000000,
+ "usageCoreNanoSeconds": 5000000000
+ },
+ "memory": {
+ "time": "2025-10-27T00:00:00Z",
+ "availableBytes": 8000000000,
+ "usageBytes": 4000000000,
+ "workingSetBytes": 3500000000
+ }
+ },
+ "pods": []
+ }`))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ s.InitMcpClient()
+ s.Run("nodes_stats_summary(name=nil)", func() {
+ toolResult, err := s.CallTool("nodes_stats_summary", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing name", func() {
+ expectedMessage := "failed to get node stats summary, missing argument name"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_stats_summary(name=inexistent-node)", func() {
+ toolResult, err := s.CallTool("nodes_stats_summary", map[string]interface{}{
+ "name": "inexistent-node",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes missing node", func() {
+ expectedMessage := "failed to get node stats summary for inexistent-node: failed to get node inexistent-node: the server could not find the requested resource (get nodes inexistent-node)"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+ s.Run("nodes_stats_summary(name=existing-node)", func() {
+ toolResult, err := s.CallTool("nodes_stats_summary", map[string]interface{}{
+ "name": "existing-node",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns stats summary", func() {
+ content := toolResult.Content[0].(mcp.TextContent).Text
+ s.Containsf(content, "existing-node", "expected stats to contain node name, got %v", content)
+ s.Containsf(content, "usageNanoCores", "expected stats to contain CPU metrics, got %v", content)
+ s.Containsf(content, "usageBytes", "expected stats to contain memory metrics, got %v", content)
+ })
+ })
+}
+
+func (s *NodesSuite) TestNodesStatsSummaryDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "Node" } ]
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("nodes_stats_summary (denied)", func() {
+ toolResult, err := s.CallTool("nodes_stats_summary", map[string]interface{}{
+ "name": "does-not-matter",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
+ expectedMessage := "failed to get node stats summary for does-not-matter: resource not allowed: /v1, Kind=Node"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+}
+
+func TestNodes(t *testing.T) {
+ suite.Run(t, new(NodesSuite))
+}
diff --git a/pkg/mcp/nodes_top_test.go b/pkg/mcp/nodes_top_test.go
new file mode 100644
index 00000000..23ae9945
--- /dev/null
+++ b/pkg/mcp/nodes_top_test.go
@@ -0,0 +1,248 @@
+package mcp
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
+)
+
+type NodesTopSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+}
+
+func (s *NodesTopSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
+ if req.URL.Path == "/api" {
+ _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
+ return
+ }
+ }))
+}
+
+func (s *NodesTopSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *NodesTopSuite) WithMetricsServer() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups)
+ if req.URL.Path == "/apis" {
+ _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[{"name":"metrics.k8s.io","versions":[{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}}]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Resources)
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" {
+ _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]}]}`))
+ return
+ }
+ }))
+}
+
+func (s *NodesTopSuite) TestNodesTop() {
+ s.WithMetricsServer()
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ // List Nodes
+ if req.URL.Path == "/api/v1/nodes" {
+ _, _ = w.Write([]byte(`{
+ "apiVersion": "v1",
+ "kind": "NodeList",
+ "items": [
+ {
+ "metadata": {
+ "name": "node-1",
+ "labels": {
+ "node-role.kubernetes.io/worker": ""
+ }
+ },
+ "status": {
+ "allocatable": {
+ "cpu": "4",
+ "memory": "16Gi"
+ },
+ "nodeInfo": {
+ "swap": {
+ "capacity": 0
+ }
+ }
+ }
+ },
+ {
+ "metadata": {
+ "name": "node-2",
+ "labels": {
+ "node-role.kubernetes.io/worker": ""
+ }
+ },
+ "status": {
+ "allocatable": {
+ "cpu": "4",
+ "memory": "16Gi"
+ },
+ "nodeInfo": {
+ "swap": {
+ "capacity": 0
+ }
+ }
+ }
+ }
+ ]
+ }`))
+ return
+ }
+ // Get NodeMetrics
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/nodes" {
+ _, _ = w.Write([]byte(`{
+ "apiVersion": "metrics.k8s.io/v1beta1",
+ "kind": "NodeMetricsList",
+ "items": [
+ {
+ "metadata": {
+ "name": "node-1"
+ },
+ "timestamp": "2025-10-29T09:00:00Z",
+ "window": "30s",
+ "usage": {
+ "cpu": "500m",
+ "memory": "2Gi"
+ }
+ },
+ {
+ "metadata": {
+ "name": "node-2"
+ },
+ "timestamp": "2025-10-29T09:00:00Z",
+ "window": "30s",
+ "usage": {
+ "cpu": "1000m",
+ "memory": "4Gi"
+ }
+ }
+ ]
+ }`))
+ return
+ }
+ // Get specific NodeMetrics
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/nodes/node-1" {
+ _, _ = w.Write([]byte(`{
+ "apiVersion": "metrics.k8s.io/v1beta1",
+ "kind": "NodeMetrics",
+ "metadata": {
+ "name": "node-1"
+ },
+ "timestamp": "2025-10-29T09:00:00Z",
+ "window": "30s",
+ "usage": {
+ "cpu": "500m",
+ "memory": "2Gi"
+ }
+ }`))
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ s.InitMcpClient()
+
+ s.Run("nodes_top() - all nodes", func() {
+ toolResult, err := s.CallTool("nodes_top", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns metrics for all nodes", func() {
+ content := toolResult.Content[0].(mcp.TextContent).Text
+ s.Contains(content, "node-1", "expected metrics to contain node-1")
+ s.Contains(content, "node-2", "expected metrics to contain node-2")
+ s.Contains(content, "CPU(cores)", "expected header with CPU column")
+ s.Contains(content, "MEMORY(bytes)", "expected header with MEMORY column")
+ })
+ })
+
+ s.Run("nodes_top(name=node-1) - specific node", func() {
+ toolResult, err := s.CallTool("nodes_top", map[string]interface{}{
+ "name": "node-1",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns metrics for specific node", func() {
+ content := toolResult.Content[0].(mcp.TextContent).Text
+ s.Contains(content, "node-1", "expected metrics to contain node-1")
+ s.Contains(content, "500m", "expected CPU usage of 500m")
+ s.Contains(content, "2048Mi", "expected memory usage of 2048Mi")
+ })
+ })
+
+ s.Run("nodes_top(label_selector=node-role.kubernetes.io/worker=)", func() {
+ toolResult, err := s.CallTool("nodes_top", map[string]interface{}{
+ "label_selector": "node-role.kubernetes.io/worker=",
+ })
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("no error", func() {
+ s.Falsef(toolResult.IsError, "call tool should succeed")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("returns metrics for filtered nodes", func() {
+ content := toolResult.Content[0].(mcp.TextContent).Text
+ s.Contains(content, "node-1", "expected metrics to contain node-1")
+ s.Contains(content, "node-2", "expected metrics to contain node-2")
+ })
+ })
+}
+
+func (s *NodesTopSuite) TestNodesTopMetricsUnavailable() {
+ s.InitMcpClient()
+
+ s.Run("nodes_top() - metrics unavailable", func() {
+ toolResult, err := s.CallTool("nodes_top", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail when metrics unavailable")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes metrics unavailable", func() {
+ content := toolResult.Content[0].(mcp.TextContent).Text
+ s.Contains(content, "failed to get nodes top", "expected error message about failing to get nodes top")
+ })
+ })
+}
+
+func (s *NodesTopSuite) TestNodesTopDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { group = "metrics.k8s.io", version = "v1beta1" } ]
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.WithMetricsServer()
+ s.InitMcpClient()
+ s.Run("nodes_top (denied)", func() {
+ toolResult, err := s.CallTool("nodes_top", map[string]interface{}{})
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
+ expectedMessage := "failed to get nodes top: resource not allowed: metrics.k8s.io/v1beta1, Kind=NodeMetrics"
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+}
+
+func TestNodesTop(t *testing.T) {
+ suite.Run(t, new(NodesTopSuite))
+}
diff --git a/pkg/mcp/pods_exec_test.go b/pkg/mcp/pods_exec_test.go
index dac6883c..c39cc8d6 100644
--- a/pkg/mcp/pods_exec_test.go
+++ b/pkg/mcp/pods_exec_test.go
@@ -7,125 +7,132 @@ import (
"strings"
"testing"
+ "github.com/BurntSushi/toml"
"github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/containers/kubernetes-mcp-server/pkg/config"
)
-func TestPodsExec(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- mockServer := test.NewMockServer()
- defer mockServer.Close()
- c.withKubeConfig(mockServer.Config())
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" {
- return
- }
- var stdin, stdout bytes.Buffer
- ctx, err := test.CreateHTTPStreams(w, req, &test.StreamOptions{
- Stdin: &stdin,
- Stdout: &stdout,
- })
- if err != nil {
- w.WriteHeader(http.StatusInternalServerError)
- _, _ = w.Write([]byte(err.Error()))
- return
- }
- defer func(conn io.Closer) { _ = conn.Close() }(ctx.Closer)
- _, _ = io.WriteString(ctx.StdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n")
- _, _ = io.WriteString(ctx.StdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n")
- }))
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" {
- return
- }
- test.WriteObject(w, &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "default",
- Name: "pod-to-exec",
- },
- Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}},
- })
- }))
- podsExecNilNamespace, err := c.callTool("pods_exec", map[string]interface{}{
+type PodsExecSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+}
+
+func (s *PodsExecSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+}
+
+func (s *PodsExecSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *PodsExecSuite) TestPodsExec() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" {
+ return
+ }
+ var stdin, stdout bytes.Buffer
+ ctx, err := test.CreateHTTPStreams(w, req, &test.StreamOptions{
+ Stdin: &stdin,
+ Stdout: &stdout,
+ })
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ _, _ = w.Write([]byte(err.Error()))
+ return
+ }
+ defer func(conn io.Closer) { _ = conn.Close() }(ctx.Closer)
+ _, _ = io.WriteString(ctx.StdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n")
+ _, _ = io.WriteString(ctx.StdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n")
+ }))
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" {
+ return
+ }
+ test.WriteObject(w, &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "default",
+ Name: "pod-to-exec",
+ },
+ Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}},
+ })
+ }))
+ s.InitMcpClient()
+
+ s.Run("pods_exec(name=pod-to-exec, namespace=nil, command=[ls -l]), uses configured namespace", func() {
+ result, err := s.CallTool("pods_exec", map[string]interface{}{
"name": "pod-to-exec",
"command": []interface{}{"ls", "-l"},
})
- t.Run("pods_exec with name and nil namespace returns command output", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if podsExecNilNamespace.IsError {
- t.Fatalf("call tool failed: %v", podsExecNilNamespace.Content)
- }
- if !strings.Contains(podsExecNilNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") {
- t.Errorf("unexpected result %v", podsExecNilNamespace.Content[0].(mcp.TextContent).Text)
- }
+ s.Require().NotNil(result)
+ s.Run("returns command output", func() {
+ s.NoError(err, "call tool failed %v", err)
+ s.Falsef(result.IsError, "call tool failed: %v", result.Content)
+ s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text)
})
- podsExecInNamespace, err := c.callTool("pods_exec", map[string]interface{}{
+ })
+ s.Run("pods_exec(name=pod-to-exec, namespace=default, command=[ls -l])", func() {
+ result, err := s.CallTool("pods_exec", map[string]interface{}{
"namespace": "default",
"name": "pod-to-exec",
"command": []interface{}{"ls", "-l"},
})
- t.Run("pods_exec with name and namespace returns command output", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if podsExecInNamespace.IsError {
- t.Fatalf("call tool failed: %v", podsExecInNamespace.Content)
- }
- if !strings.Contains(podsExecInNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") {
- t.Errorf("unexpected result %v", podsExecInNamespace.Content[0].(mcp.TextContent).Text)
- }
+ s.Require().NotNil(result)
+ s.Run("returns command output", func() {
+ s.NoError(err, "call tool failed %v", err)
+ s.Falsef(result.IsError, "call tool failed: %v", result.Content)
+ s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text)
})
- podsExecInNamespaceAndContainer, err := c.callTool("pods_exec", map[string]interface{}{
+ })
+ s.Run("pods_exec(name=pod-to-exec, namespace=default, command=[ls -l], container=a-specific-container)", func() {
+ result, err := s.CallTool("pods_exec", map[string]interface{}{
"namespace": "default",
"name": "pod-to-exec",
"command": []interface{}{"ls", "-l"},
"container": "a-specific-container",
})
- t.Run("pods_exec with name, namespace, and container returns command output", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if podsExecInNamespaceAndContainer.IsError {
- t.Fatalf("call tool failed")
- }
- if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "command:ls -l\n") {
- t.Errorf("unexpected result %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text)
- }
- if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "container:a-specific-container\n") {
- t.Errorf("expected container name not found %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text)
- }
+ s.Require().NotNil(result)
+ s.Run("returns command output", func() {
+ s.NoError(err, "call tool failed %v", err)
+ s.Falsef(result.IsError, "call tool failed: %v", result.Content)
+ s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text)
})
})
}
-func TestPodsExecDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsExecSuite) TestPodsExecDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsRun, _ := c.callTool("pods_exec", map[string]interface{}{
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_exec (denied)", func() {
+ toolResult, err := s.CallTool("pods_exec", map[string]interface{}{
"namespace": "default",
"name": "pod-to-exec",
"command": []interface{}{"ls", "-l"},
"container": "a-specific-container",
})
- t.Run("pods_exec has error", func(t *testing.T) {
- if !podsRun.IsError {
- t.Fatalf("call tool should fail")
- }
+ s.Require().NotNil(toolResult, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("pods_exec describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to exec in pod pod-to-exec in namespace default: resource not allowed: /v1, Kind=Pod"
- if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
})
})
}
+
+func TestPodsExec(t *testing.T) {
+ suite.Run(t, new(PodsExecSuite))
+}
diff --git a/pkg/mcp/pods_run_test.go b/pkg/mcp/pods_run_test.go
new file mode 100644
index 00000000..4c329f3e
--- /dev/null
+++ b/pkg/mcp/pods_run_test.go
@@ -0,0 +1,145 @@
+package mcp
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "sigs.k8s.io/yaml"
+)
+
+type PodsRunSuite struct {
+ BaseMcpSuite
+}
+
+func (s *PodsRunSuite) TestPodsRun() {
+ s.InitMcpClient()
+ s.Run("pods_run with nil image returns error", func() {
+ toolResult, _ := s.CallTool("pods_run", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to run pod, missing argument image", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_run(image=nginx, namespace=nil), uses configured namespace", func() {
+ podsRunNilNamespace, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx"})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsRunNilNamespace.IsError, "call tool failed")
+ })
+ var decodedNilNamespace []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(podsRunNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
+ })
+ s.Run("returns 1 item (Pod)", func() {
+ s.Lenf(decodedNilNamespace, 1, "invalid pods count, expected 1, got %v", len(decodedNilNamespace))
+ s.Equalf("Pod", decodedNilNamespace[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedNilNamespace[0].GetKind())
+ })
+ s.Run("returns pod in default", func() {
+ s.Equalf("default", decodedNilNamespace[0].GetNamespace(), "invalid pod namespace, expected default, got %v", decodedNilNamespace[0].GetNamespace())
+ })
+ s.Run("returns pod with random name", func() {
+ s.Truef(strings.HasPrefix(decodedNilNamespace[0].GetName(), "kubernetes-mcp-server-run-"),
+ "invalid pod name, expected random, got %v", decodedNilNamespace[0].GetName())
+ })
+ s.Run("returns pod with labels", func() {
+ labels := decodedNilNamespace[0].Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{})
+ s.NotEqualf("", labels["app.kubernetes.io/name"], "invalid labels, expected app.kubernetes.io/name, got %v", labels)
+ s.NotEqualf("", labels["app.kubernetes.io/component"], "invalid labels, expected app.kubernetes.io/component, got %v", labels)
+ s.Equalf("kubernetes-mcp-server", labels["app.kubernetes.io/managed-by"], "invalid labels, expected app.kubernetes.io/managed-by, got %v", labels)
+ s.Equalf("kubernetes-mcp-server-run-sandbox", labels["app.kubernetes.io/part-of"], "invalid labels, expected app.kubernetes.io/part-of, got %v", labels)
+ })
+ s.Run("returns pod with nginx container", func() {
+ containers := decodedNilNamespace[0].Object["spec"].(map[string]interface{})["containers"].([]interface{})
+ s.Equalf("nginx", containers[0].(map[string]interface{})["image"], "invalid container name, expected nginx, got %v", containers[0].(map[string]interface{})["image"])
+ })
+ })
+ s.Run("pods_run(image=nginx, namespace=nil, port=80)", func() {
+ podsRunNamespaceAndPort, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsRunNamespaceAndPort.IsError, "call tool failed")
+ })
+ var decodedNamespaceAndPort []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(podsRunNamespaceAndPort.Content[0].(mcp.TextContent).Text), &decodedNamespaceAndPort)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
+ })
+ s.Run("returns 2 items (Pod + Service)", func() {
+ s.Lenf(decodedNamespaceAndPort, 2, "invalid pods count, expected 2, got %v", len(decodedNamespaceAndPort))
+ s.Equalf("Pod", decodedNamespaceAndPort[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedNamespaceAndPort[0].GetKind())
+ s.Equalf("Service", decodedNamespaceAndPort[1].GetKind(), "invalid service kind, expected Service, got %v", decodedNamespaceAndPort[1].GetKind())
+ })
+ s.Run("returns pod with port", func() {
+ containers := decodedNamespaceAndPort[0].Object["spec"].(map[string]interface{})["containers"].([]interface{})
+ ports := containers[0].(map[string]interface{})["ports"].([]interface{})
+ s.Equalf(int64(80), ports[0].(map[string]interface{})["containerPort"], "invalid container port, expected 80, got %v", ports[0].(map[string]interface{})["containerPort"])
+ })
+ s.Run("returns service with port and selector", func() {
+ ports := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["ports"].([]interface{})
+ s.Equalf(int64(80), ports[0].(map[string]interface{})["port"], "invalid service port, expected 80, got %v", ports[0].(map[string]interface{})["port"])
+ s.Equalf(int64(80), ports[0].(map[string]interface{})["targetPort"], "invalid service target port, expected 80, got %v", ports[0].(map[string]interface{})["targetPort"])
+ selector := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["selector"].(map[string]interface{})
+ s.NotEqualf("", selector["app.kubernetes.io/name"], "invalid service selector, expected app.kubernetes.io/name, got %v", selector)
+ s.Equalf("kubernetes-mcp-server", selector["app.kubernetes.io/managed-by"], "invalid service selector, expected app.kubernetes.io/managed-by, got %v", selector)
+ s.Equalf("kubernetes-mcp-server-run-sandbox", selector["app.kubernetes.io/part-of"], "invalid service selector, expected app.kubernetes.io/part-of, got %v", selector)
+ })
+ })
+}
+
+func (s *PodsRunSuite) TestPodsRunDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "Pod" } ]
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_run (denied)", func() {
+ podsRun, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx"})
+ s.Run("has error", func() {
+ s.Truef(podsRun.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
+ expectedMessage := "failed to run pod in namespace : resource not allowed: /v1, Kind=Pod"
+ s.Equalf(expectedMessage, podsRun.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text)
+ })
+ })
+}
+
+func (s *PodsRunSuite) TestPodsRunInOpenShift() {
+ s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift")
+ s.T().Cleanup(func() {
+ s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration")
+ })
+ s.InitMcpClient()
+
+ s.Run("pods_run(image=nginx, namespace=nil, port=80) returns route with port", func() {
+ podsRunInOpenShift, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsRunInOpenShift.IsError, "call tool failed")
+ })
+ var decodedPodServiceRoute []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(podsRunInOpenShift.Content[0].(mcp.TextContent).Text), &decodedPodServiceRoute)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
+ })
+ s.Run("returns 3 items (Pod + Service + Route)", func() {
+ s.Lenf(decodedPodServiceRoute, 3, "invalid pods count, expected 3, got %v", len(decodedPodServiceRoute))
+ s.Equalf("Pod", decodedPodServiceRoute[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedPodServiceRoute[0].GetKind())
+ s.Equalf("Service", decodedPodServiceRoute[1].GetKind(), "invalid service kind, expected Service, got %v", decodedPodServiceRoute[1].GetKind())
+ s.Equalf("Route", decodedPodServiceRoute[2].GetKind(), "invalid route kind, expected Route, got %v", decodedPodServiceRoute[2].GetKind())
+ })
+ s.Run("returns route with port", func() {
+ targetPort := decodedPodServiceRoute[2].Object["spec"].(map[string]interface{})["port"].(map[string]interface{})["targetPort"].(int64)
+ s.Equalf(int64(80), targetPort, "invalid route target port, expected 80, got %v", targetPort)
+ })
+ })
+}
+
+func TestPodsRun(t *testing.T) {
+ suite.Run(t, new(PodsRunSuite))
+}
diff --git a/pkg/mcp/pods_test.go b/pkg/mcp/pods_test.go
index cfa20dcb..ddeec3ea 100644
--- a/pkg/mcp/pods_test.go
+++ b/pkg/mcp/pods_test.go
@@ -5,9 +5,8 @@ import (
"strings"
"testing"
- "github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/containers/kubernetes-mcp-server/pkg/config"
- "github.com/containers/kubernetes-mcp-server/pkg/output"
+ "github.com/BurntSushi/toml"
+ "github.com/stretchr/testify/suite"
"github.com/mark3labs/mcp-go/mcp"
corev1 "k8s.io/api/core/v1"
@@ -16,228 +15,194 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
"sigs.k8s.io/yaml"
)
-func TestPodsListInAllNamespaces(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- toolResult, err := c.callTool("pods_list", map[string]interface{}{})
- t.Run("pods_list returns pods list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- }
+type PodsSuite struct {
+ BaseMcpSuite
+}
+
+func (s *PodsSuite) TestPodsListInAllNamespaces() {
+ s.InitMcpClient()
+ s.Run("pods_list returns pods list in all namespaces", func() {
+ toolResult, err := s.CallTool("pods_list", map[string]interface{}{})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
var decoded []unstructured.Unstructured
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- t.Run("pods_list has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_list returns 3 items", func(t *testing.T) {
- if len(decoded) != 3 {
- t.Fatalf("invalid pods count, expected 3, got %v", len(decoded))
- }
+ s.Run("returns at least 3 items", func() {
+ s.GreaterOrEqualf(len(decoded), 3, "invalid pods count, expected at least 3, got %v", len(decoded))
})
- t.Run("pods_list returns pod in ns-1", func(t *testing.T) {
- if decoded[1].GetName() != "a-pod-in-ns-1" {
- t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[1].GetName())
- }
- if decoded[1].GetNamespace() != "ns-1" {
- t.Fatalf("invalid pod namespace, expected ns-1, got %v", decoded[1].GetNamespace())
+ var aPodInNs1, aPodInNs2 *unstructured.Unstructured
+ for _, pod := range decoded {
+ switch pod.GetName() {
+ case "a-pod-in-ns-1":
+ aPodInNs1 = &pod
+ case "a-pod-in-ns-2":
+ aPodInNs2 = &pod
}
+ }
+ s.Run("returns pod in ns-1", func() {
+ s.Require().NotNil(aPodInNs1, "aPodInNs1 is nil")
+ s.Equalf("a-pod-in-ns-1", aPodInNs1.GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", aPodInNs1.GetName())
+ s.Equalf("ns-1", aPodInNs1.GetNamespace(), "invalid pod namespace, expected ns-1, got %v", aPodInNs1.GetNamespace())
})
- t.Run("pods_list returns pod in ns-2", func(t *testing.T) {
- if decoded[2].GetName() != "a-pod-in-ns-2" {
- t.Fatalf("invalid pod name, expected a-pod-in-ns-2, got %v", decoded[2].GetName())
- }
- if decoded[2].GetNamespace() != "ns-2" {
- t.Fatalf("invalid pod namespace, expected ns-2, got %v", decoded[2].GetNamespace())
- }
+ s.Run("returns pod in ns-2", func() {
+ s.Require().NotNil(aPodInNs2, "aPodInNs2 is nil")
+ s.Equalf("a-pod-in-ns-2", aPodInNs2.GetName(), "invalid pod name, expected a-pod-in-ns-2, got %v", aPodInNs2.GetName())
+ s.Equalf("ns-2", aPodInNs2.GetNamespace(), "invalid pod namespace, expected ns-2, got %v", aPodInNs2.GetNamespace())
})
- t.Run("pods_list omits managed fields", func(t *testing.T) {
- if decoded[1].GetManagedFields() != nil {
- t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields())
- }
+ s.Run("omits managed fields", func() {
+ s.Nilf(decoded[1].GetManagedFields(), "managed fields should be omitted, got %v", decoded[1].GetManagedFields())
})
})
}
-func TestPodsListInAllNamespacesUnauthorized(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- defer restoreAuth(c.ctx)
- client := c.newKubernetesClient()
- // Authorize user only for default/configured namespace
- r, _ := client.RbacV1().Roles("default").Create(c.ctx, &rbacv1.Role{
- ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"},
- Rules: []rbacv1.PolicyRule{{
- Verbs: []string{"get", "list"},
- APIGroups: []string{""},
- Resources: []string{"pods"},
- }},
- }, metav1.CreateOptions{})
- _, _ = client.RbacV1().RoleBindings("default").Create(c.ctx, &rbacv1.RoleBinding{
- ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"},
- Subjects: []rbacv1.Subject{{Kind: "User", Name: envTestUser.Name}},
- RoleRef: rbacv1.RoleRef{Kind: "Role", Name: r.Name},
- }, metav1.CreateOptions{})
- // Deny cluster by removing cluster rule
- _ = client.RbacV1().ClusterRoles().Delete(c.ctx, "allow-all", metav1.DeleteOptions{})
- toolResult, err := c.callTool("pods_list", map[string]interface{}{})
- t.Run("pods_list returns pods list for default namespace only", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed %v", toolResult.Content)
- return
- }
+func (s *PodsSuite) TestPodsListInAllNamespacesUnauthorized() {
+ s.InitMcpClient()
+ defer restoreAuth(s.T().Context())
+ client := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ // Authorize user only for default/configured namespace
+ r, _ := client.RbacV1().Roles("default").Create(s.T().Context(), &rbacv1.Role{
+ ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"},
+ Rules: []rbacv1.PolicyRule{{
+ Verbs: []string{"get", "list"},
+ APIGroups: []string{""},
+ Resources: []string{"pods"},
+ }},
+ }, metav1.CreateOptions{})
+ _, _ = client.RbacV1().RoleBindings("default").Create(s.T().Context(), &rbacv1.RoleBinding{
+ ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"},
+ Subjects: []rbacv1.Subject{{Kind: "User", Name: envTestUser.Name}},
+ RoleRef: rbacv1.RoleRef{Kind: "Role", Name: r.Name},
+ }, metav1.CreateOptions{})
+ // Deny cluster by removing cluster rule
+ _ = client.RbacV1().ClusterRoles().Delete(s.T().Context(), "allow-all", metav1.DeleteOptions{})
+ s.Run("pods_list returns pods list for default namespace only", func() {
+ toolResult, err := s.CallTool("pods_list", map[string]interface{}{})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed %v", toolResult.Content)
})
var decoded []unstructured.Unstructured
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- t.Run("pods_list has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_list returns 1 items", func(t *testing.T) {
- if len(decoded) != 1 {
- t.Fatalf("invalid pods count, expected 1, got %v", len(decoded))
- return
- }
+ s.Run("returns at least 1 item", func() {
+ s.GreaterOrEqualf(len(decoded), 1, "invalid pods count, expected at least 1, got %v", len(decoded))
})
- t.Run("pods_list returns pod in default", func(t *testing.T) {
- if decoded[0].GetName() != "a-pod-in-default" {
- t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decoded[0].GetName())
- return
+ s.Run("all pods are in default namespace", func() {
+ for _, pod := range decoded {
+ s.Equalf("default", pod.GetNamespace(), "all pods should be in default namespace, got pod %s in namespace %s", pod.GetName(), pod.GetNamespace())
}
- if decoded[0].GetNamespace() != "default" {
- t.Fatalf("invalid pod namespace, expected default, got %v", decoded[0].GetNamespace())
- return
+ })
+ s.Run("includes a-pod-in-default", func() {
+ found := false
+ for _, pod := range decoded {
+ if pod.GetName() == "a-pod-in-default" {
+ found = true
+ break
+ }
}
+ s.Truef(found, "expected to find pod a-pod-in-default")
})
})
}
-func TestPodsListInNamespace(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("pods_list_in_namespace with nil namespace returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to list pods in namespace, missing argument namespace" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{
+func (s *PodsSuite) TestPodsListInNamespace() {
+ s.InitMcpClient()
+ s.Run("pods_list_in_namespace with nil namespace returns error", func() {
+ toolResult, _ := s.CallTool("pods_list_in_namespace", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to list pods in namespace, missing argument namespace", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_list_in_namespace(namespace=ns-1) returns pods list", func() {
+ toolResult, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{
"namespace": "ns-1",
})
- t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- }
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
var decoded []unstructured.Unstructured
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- t.Run("pods_list_in_namespace has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_list_in_namespace returns 1 items", func(t *testing.T) {
- if len(decoded) != 1 {
- t.Fatalf("invalid pods count, expected 1, got %v", len(decoded))
- }
+ s.Run("returns 1 item", func() {
+ s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded))
})
- t.Run("pods_list_in_namespace returns pod in ns-1", func(t *testing.T) {
- if decoded[0].GetName() != "a-pod-in-ns-1" {
- t.Errorf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[0].GetName())
- }
- if decoded[0].GetNamespace() != "ns-1" {
- t.Errorf("invalid pod namespace, expected ns-1, got %v", decoded[0].GetNamespace())
- }
+ s.Run("returns pod in ns-1", func() {
+ s.Equalf("a-pod-in-ns-1", decoded[0].GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", decoded[0].GetName())
+ s.Equalf("ns-1", decoded[0].GetNamespace(), "invalid pod namespace, expected ns-1, got %v", decoded[0].GetNamespace())
})
- t.Run("pods_list_in_namespace omits managed fields", func(t *testing.T) {
- if decoded[0].GetManagedFields() != nil {
- t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields())
- }
+ s.Run("omits managed fields", func() {
+ s.Nilf(decoded[0].GetManagedFields(), "managed fields should be omitted, got %v", decoded[0].GetManagedFields())
})
})
}
-func TestPodsListDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsSuite) TestPodsListDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsList, _ := c.callTool("pods_list", map[string]interface{}{})
- t.Run("pods_list has error", func(t *testing.T) {
- if !podsList.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("pods_list describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_list (denied)", func() {
+ podsList, err := s.CallTool("pods_list", map[string]interface{}{})
+ s.Run("has error", func() {
+ s.Truef(podsList.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to list pods in all namespaces: resource not allowed: /v1, Kind=Pod"
- if podsList.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsList.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, podsList.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsList.Content[0].(mcp.TextContent).Text)
})
- podsListInNamespace, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{"namespace": "ns-1"})
- t.Run("pods_list_in_namespace has error", func(t *testing.T) {
- if !podsListInNamespace.IsError {
- t.Fatalf("call tool should fail")
- }
+ })
+ s.Run("pods_list_in_namespace (denied)", func() {
+ podsListInNamespace, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{"namespace": "ns-1"})
+ s.Run("has error", func() {
+ s.Truef(podsListInNamespace.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("pods_list_in_namespace describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to list pods in namespace ns-1: resource not allowed: /v1, Kind=Pod"
- if podsListInNamespace.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text)
})
})
}
-func TestPodsListAsTable(t *testing.T) {
- testCaseWithContext(t, &mcpContext{listOutput: output.Table}, func(c *mcpContext) {
- c.withEnvTest()
- podsList, err := c.callTool("pods_list", map[string]interface{}{})
- t.Run("pods_list returns pods list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if podsList.IsError {
- t.Fatalf("call tool failed")
- }
- })
+func (s *PodsSuite) TestPodsListAsTable() {
+ s.Cfg.ListOutput = "table"
+ s.InitMcpClient()
+ s.Run("pods_list (list_output=table)", func() {
+ podsList, err := s.CallTool("pods_list", map[string]interface{}{})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsList.IsError, "call tool failed")
+ })
+ s.Require().NotNil(podsList, "Expected tool result from call")
outPodsList := podsList.Content[0].(mcp.TextContent).Text
- t.Run("pods_list returns table with 1 header and 3 rows", func(t *testing.T) {
+ s.Run("returns table with header and rows", func() {
lines := strings.Count(outPodsList, "\n")
- if lines != 4 {
- t.Fatalf("invalid line count, expected 4 (1 header, 3 row), got %v", lines)
- }
+ s.GreaterOrEqualf(lines, 3, "invalid line count, expected at least 3 (1 header, 2+ rows), got %v", lines)
})
- t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) {
+ s.Run("returns column headers", func() {
expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS"
- if m, e := regexp.MatchString(expectedHeaders, outPodsList); !m || e != nil {
- t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsList)
- }
+ m, e := regexp.MatchString(expectedHeaders, outPodsList)
+ s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsList)
+ s.NoErrorf(e, "Error matching headers regex: %v", e)
})
- t.Run("pods_list_in_namespace returns formatted row for a-pod-in-ns-1", func(t *testing.T) {
+ s.Run("returns formatted row for a-pod-in-ns-1", func() {
expectedRow := "(?ns-1)\\s+" +
"(?v1)\\s+" +
"(?Pod)\\s+" +
@@ -251,11 +216,11 @@ func TestPodsListAsTable(t *testing.T) {
"(?)\\s+" +
"(?)\\s+" +
"(?)"
- if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil {
- t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList)
- }
+ m, e := regexp.MatchString(expectedRow, outPodsList)
+ s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsList)
+ s.NoErrorf(e, "Error matching a-pod-in-ns-1 regex: %v", e)
})
- t.Run("pods_list_in_namespace returns formatted row for a-pod-in-default", func(t *testing.T) {
+ s.Run("returns formatted row for a-pod-in-default", func() {
expectedRow := "(?default)\\s+" +
"(?v1)\\s+" +
"(?Pod)\\s+" +
@@ -269,36 +234,32 @@ func TestPodsListAsTable(t *testing.T) {
"(?)\\s+" +
"(?)\\s+" +
"(?app=nginx)"
- if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil {
- t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList)
- }
+ m, e := regexp.MatchString(expectedRow, outPodsList)
+ s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsList)
+ s.NoErrorf(e, "Error matching a-pod-in-default regex: %v", e)
})
- podsListInNamespace, err := c.callTool("pods_list_in_namespace", map[string]interface{}{
+ })
+ s.Run("pods_list_in_namespace (list_output=table)", func() {
+ podsListInNamespace, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{
"namespace": "ns-1",
})
- t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsListInNamespace.IsError {
- t.Fatalf("call tool failed")
- }
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsListInNamespace.IsError, "call tool failed")
})
+ s.Require().NotNil(podsListInNamespace, "Expected tool result from call")
outPodsListInNamespace := podsListInNamespace.Content[0].(mcp.TextContent).Text
- t.Run("pods_list_in_namespace returns table with 1 header and 1 row", func(t *testing.T) {
+ s.Run("returns table with header and row", func() {
lines := strings.Count(outPodsListInNamespace, "\n")
- if lines != 2 {
- t.Fatalf("invalid line count, expected 2 (1 header, 1 row), got %v", lines)
- }
+ s.GreaterOrEqualf(lines, 1, "invalid line count, expected at least 1 (1 header, 1+ rows), got %v", lines)
})
- t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) {
+ s.Run("returns column headers", func() {
expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS"
- if m, e := regexp.MatchString(expectedHeaders, outPodsListInNamespace); !m || e != nil {
- t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsListInNamespace)
- }
+ m, e := regexp.MatchString(expectedHeaders, outPodsListInNamespace)
+ s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsListInNamespace)
+ s.NoErrorf(e, "Error matching headers regex: %v", e)
})
- t.Run("pods_list_in_namespace returns formatted row", func(t *testing.T) {
+ s.Run("returns formatted row", func() {
expectedRow := "(?ns-1)\\s+" +
"(?v1)\\s+" +
"(?Pod)\\s+" +
@@ -312,297 +273,207 @@ func TestPodsListAsTable(t *testing.T) {
"(?)\\s+" +
"(?)\\s+" +
"(?)"
- if m, e := regexp.MatchString(expectedRow, outPodsListInNamespace); !m || e != nil {
- t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsListInNamespace)
- }
+ m, e := regexp.MatchString(expectedRow, outPodsListInNamespace)
+ s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsListInNamespace)
+ s.NoErrorf(e, "Error matching formatted row regex: %v", e)
})
})
}
-func TestPodsGet(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("pods_get with nil name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_get", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod, missing argument name" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("pods_get with not found name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_get", map[string]interface{}{"name": "not-found"})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found in namespace : pods \"not-found\" not found" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- podsGetNilNamespace, err := c.callTool("pods_get", map[string]interface{}{
+func (s *PodsSuite) TestPodsGet() {
+ s.InitMcpClient()
+ s.Run("pods_get with nil name returns error", func() {
+ toolResult, _ := s.CallTool("pods_get", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get pod, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_get(name=not-found) with not found name returns error", func() {
+ toolResult, _ := s.CallTool("pods_get", map[string]interface{}{"name": "not-found"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get pod not-found in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_get(name=a-pod-in-default, namespace=nil), uses configured namespace", func() {
+ podsGetNilNamespace, err := s.CallTool("pods_get", map[string]interface{}{
"name": "a-pod-in-default",
})
- t.Run("pods_get with name and nil namespace returns pod", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsGetNilNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
+ s.Run("returns pod", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsGetNilNamespace.IsError, "call tool failed")
})
var decodedNilNamespace unstructured.Unstructured
err = yaml.Unmarshal([]byte(podsGetNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace)
- t.Run("pods_get with name and nil namespace has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_get with name and nil namespace returns pod in default", func(t *testing.T) {
- if decodedNilNamespace.GetName() != "a-pod-in-default" {
- t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decodedNilNamespace.GetName())
- return
- }
- if decodedNilNamespace.GetNamespace() != "default" {
- t.Fatalf("invalid pod namespace, expected default, got %v", decodedNilNamespace.GetNamespace())
- return
- }
+ s.Run("returns pod in default", func() {
+ s.Equalf("a-pod-in-default", decodedNilNamespace.GetName(), "invalid pod name, expected a-pod-in-default, got %v", decodedNilNamespace.GetName())
+ s.Equalf("default", decodedNilNamespace.GetNamespace(), "invalid pod namespace, expected default, got %v", decodedNilNamespace.GetNamespace())
})
- t.Run("pods_get with name and nil namespace omits managed fields", func(t *testing.T) {
- if decodedNilNamespace.GetManagedFields() != nil {
- t.Fatalf("managed fields should be omitted, got %v", decodedNilNamespace.GetManagedFields())
- return
- }
+ s.Run("omits managed fields", func() {
+ s.Nilf(decodedNilNamespace.GetManagedFields(), "managed fields should be omitted, got %v", decodedNilNamespace.GetManagedFields())
})
- podsGetInNamespace, err := c.callTool("pods_get", map[string]interface{}{
+ })
+ s.Run("pods_get(name=a-pod-in-default, namespace=ns-1)", func() {
+ podsGetInNamespace, err := s.CallTool("pods_get", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
})
- t.Run("pods_get with name and namespace returns pod", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsGetInNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
+ s.Run("returns pod", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsGetInNamespace.IsError, "call tool failed")
})
var decodedInNamespace unstructured.Unstructured
err = yaml.Unmarshal([]byte(podsGetInNamespace.Content[0].(mcp.TextContent).Text), &decodedInNamespace)
- t.Run("pods_get with name and namespace has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_get with name and namespace returns pod in ns-1", func(t *testing.T) {
- if decodedInNamespace.GetName() != "a-pod-in-ns-1" {
- t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decodedInNamespace.GetName())
- return
- }
- if decodedInNamespace.GetNamespace() != "ns-1" {
- t.Fatalf("invalid pod namespace, ns-1 ns-1, got %v", decodedInNamespace.GetNamespace())
- return
- }
+ s.Run("returns pod in ns-1", func() {
+ s.Equalf("a-pod-in-ns-1", decodedInNamespace.GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", decodedInNamespace.GetName())
+ s.Equalf("ns-1", decodedInNamespace.GetNamespace(), "invalid pod namespace, expected ns-1, got %v", decodedInNamespace.GetNamespace())
})
})
}
-func TestPodsGetDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsSuite) TestPodsGetDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsGet, _ := c.callTool("pods_get", map[string]interface{}{"name": "a-pod-in-default"})
- t.Run("pods_get has error", func(t *testing.T) {
- if !podsGet.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("pods_get describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_get (denied)", func() {
+ podsGet, err := s.CallTool("pods_get", map[string]interface{}{"name": "a-pod-in-default"})
+ s.Run("has error", func() {
+ s.Truef(podsGet.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to get pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod"
- if podsGet.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsGet.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, podsGet.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsGet.Content[0].(mcp.TextContent).Text)
})
})
}
-func TestPodsDelete(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- // Errors
- t.Run("pods_delete with nil name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_delete", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Errorf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod, missing argument name" {
- t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("pods_delete with not found name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_delete", map[string]interface{}{"name": "not-found"})
- if toolResult.IsError != true {
- t.Errorf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod not-found in namespace : pods \"not-found\" not found" {
- t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- // Default/nil Namespace
- kc := c.newKubernetesClient()
- _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{
+func (s *PodsSuite) TestPodsDelete() {
+ s.InitMcpClient()
+ s.Run("pods_delete with nil name returns error", func() {
+ toolResult, _ := s.CallTool("pods_delete", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete pod, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_delete(name=not-found) with not found name returns error", func() {
+ toolResult, _ := s.CallTool("pods_delete", map[string]interface{}{"name": "not-found"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete pod not-found in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_delete(name=a-pod-to-delete, namespace=nil), uses configured namespace", func() {
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete"},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
}, metav1.CreateOptions{})
- podsDeleteNilNamespace, err := c.callTool("pods_delete", map[string]interface{}{
+ podsDeleteNilNamespace, err := s.CallTool("pods_delete", map[string]interface{}{
"name": "a-pod-to-delete",
})
- t.Run("pods_delete with name and nil namespace returns success", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsDeleteNilNamespace.IsError {
- t.Errorf("call tool failed")
- return
- }
- if podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" {
- t.Errorf("invalid tool result content, got %v", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text)
- return
- }
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsDeleteNilNamespace.IsError, "call tool failed")
+ s.Equalf("Pod deleted successfully", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text)
})
- t.Run("pods_delete with name and nil namespace deletes Pod", func(t *testing.T) {
- p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-pod-to-delete", metav1.GetOptions{})
- if pErr == nil && p != nil && p.DeletionTimestamp == nil {
- t.Errorf("Pod not deleted")
- return
- }
+ s.Run("deletes Pod", func() {
+ p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-pod-to-delete", metav1.GetOptions{})
+ s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted")
})
- // Provided Namespace
- _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{
+ })
+ s.Run("pods_delete(name=a-pod-to-delete-in-ns-1, namespace=ns-1)", func() {
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().Pods("ns-1").Create(s.T().Context(), &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete-in-ns-1"},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
}, metav1.CreateOptions{})
- podsDeleteInNamespace, err := c.callTool("pods_delete", map[string]interface{}{
+ podsDeleteInNamespace, err := s.CallTool("pods_delete", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-to-delete-in-ns-1",
})
- t.Run("pods_delete with name and namespace returns success", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsDeleteInNamespace.IsError {
- t.Errorf("call tool failed")
- return
- }
- if podsDeleteInNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" {
- t.Errorf("invalid tool result content, got %v", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text)
- return
- }
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsDeleteInNamespace.IsError, "call tool failed")
+ s.Equalf("Pod deleted successfully", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text)
})
- t.Run("pods_delete with name and namespace deletes Pod", func(t *testing.T) {
- p, pErr := kc.CoreV1().Pods("ns-1").Get(c.ctx, "a-pod-to-delete-in-ns-1", metav1.GetOptions{})
- if pErr == nil && p != nil && p.DeletionTimestamp == nil {
- t.Errorf("Pod not deleted")
- return
- }
+ s.Run("deletes Pod", func() {
+ p, pErr := kc.CoreV1().Pods("ns-1").Get(s.T().Context(), "a-pod-to-delete-in-ns-1", metav1.GetOptions{})
+ s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted")
})
- // Managed Pod
+ })
+ s.Run("pods_delete(name=a-managed-pod-to-delete, namespace=ns-1) with managed pod", func() {
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
managedLabels := map[string]string{
"app.kubernetes.io/managed-by": "kubernetes-mcp-server",
"app.kubernetes.io/name": "a-manged-pod-to-delete",
}
- _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{
+ _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete", Labels: managedLabels},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
}, metav1.CreateOptions{})
- _, _ = kc.CoreV1().Services("default").Create(c.ctx, &corev1.Service{
+ _, _ = kc.CoreV1().Services("default").Create(s.T().Context(), &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "a-managed-service-to-delete", Labels: managedLabels},
Spec: corev1.ServiceSpec{Selector: managedLabels, Ports: []corev1.ServicePort{{Port: 80}}},
}, metav1.CreateOptions{})
- podsDeleteManaged, err := c.callTool("pods_delete", map[string]interface{}{
+ podsDeleteManaged, err := s.CallTool("pods_delete", map[string]interface{}{
"name": "a-managed-pod-to-delete",
})
- t.Run("pods_delete with managed pod returns success", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsDeleteManaged.IsError {
- t.Errorf("call tool failed")
- return
- }
- if podsDeleteManaged.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" {
- t.Errorf("invalid tool result content, got %v", podsDeleteManaged.Content[0].(mcp.TextContent).Text)
- return
- }
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsDeleteManaged.IsError, "call tool failed")
+ s.Equalf("Pod deleted successfully", podsDeleteManaged.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteManaged.Content[0].(mcp.TextContent).Text)
})
- t.Run("pods_delete with managed pod deletes Pod and Service", func(t *testing.T) {
- p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete", metav1.GetOptions{})
- if pErr == nil && p != nil && p.DeletionTimestamp == nil {
- t.Errorf("Pod not deleted")
- return
- }
- s, sErr := kc.CoreV1().Services("default").Get(c.ctx, "a-managed-service-to-delete", metav1.GetOptions{})
- if sErr == nil && s != nil && s.DeletionTimestamp == nil {
- t.Errorf("Service not deleted")
- return
- }
+ s.Run("deletes Pod and Service", func() {
+ p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-managed-pod-to-delete", metav1.GetOptions{})
+ s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted")
+ svc, sErr := kc.CoreV1().Services("default").Get(s.T().Context(), "a-managed-service-to-delete", metav1.GetOptions{})
+ s.Truef(sErr != nil || svc == nil || svc.DeletionTimestamp != nil, "Service not deleted")
})
})
}
-func TestPodsDeleteDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsSuite) TestPodsDeleteDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsDelete, _ := c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-in-default"})
- t.Run("pods_delete has error", func(t *testing.T) {
- if !podsDelete.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("pods_delete describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_delete (denied)", func() {
+ podsDelete, err := s.CallTool("pods_delete", map[string]interface{}{"name": "a-pod-in-default"})
+ s.Run("has error", func() {
+ s.Truef(podsDelete.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to delete pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod"
- if podsDelete.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text)
})
})
}
-func TestPodsDeleteInOpenShift(t *testing.T) {
- testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) {
+func (s *PodsSuite) TestPodsDeleteInOpenShift() {
+ s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift")
+ s.T().Cleanup(func() {
+ s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration")
+ })
+ s.InitMcpClient()
+
+ s.Run("pods_delete with managed pod in OpenShift", func() {
managedLabels := map[string]string{
"app.kubernetes.io/managed-by": "kubernetes-mcp-server",
"app.kubernetes.io/name": "a-manged-pod-to-delete",
}
- kc := c.newKubernetesClient()
- _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete-in-openshift", Labels: managedLabels},
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
}, metav1.CreateOptions{})
dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig)
_, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}).
- Namespace("default").Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{
+ Namespace("default").Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "route.openshift.io/v1",
"kind": "Route",
"metadata": map[string]interface{}{
@@ -610,519 +481,212 @@ func TestPodsDeleteInOpenShift(t *testing.T) {
"labels": managedLabels,
},
}}, metav1.CreateOptions{})
- podsDeleteManagedOpenShift, err := c.callTool("pods_delete", map[string]interface{}{
+ podsDeleteManagedOpenShift, err := s.CallTool("pods_delete", map[string]interface{}{
"name": "a-managed-pod-to-delete-in-openshift",
})
- t.Run("pods_delete with managed pod in OpenShift returns success", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsDeleteManagedOpenShift.IsError {
- t.Errorf("call tool failed")
- return
- }
- if podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" {
- t.Errorf("invalid tool result content, got %v", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text)
- return
- }
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsDeleteManagedOpenShift.IsError, "call tool failed")
+ s.Equalf("Pod deleted successfully", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text,
+ "invalid tool result content, got %v", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text)
})
- t.Run("pods_delete with managed pod in OpenShift deletes Pod and Route", func(t *testing.T) {
- p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete-in-openshift", metav1.GetOptions{})
- if pErr == nil && p != nil && p.DeletionTimestamp == nil {
- t.Errorf("Pod not deleted")
- return
- }
+ s.Run("deletes Pod and Route", func() {
+ p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-managed-pod-to-delete-in-openshift", metav1.GetOptions{})
+ s.False(pErr == nil && p != nil && p.DeletionTimestamp == nil, "Pod not deleted")
r, rErr := dynamicClient.
Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}).
- Namespace("default").Get(c.ctx, "a-managed-route-to-delete", metav1.GetOptions{})
- if rErr == nil && r != nil && r.GetDeletionTimestamp() == nil {
- t.Errorf("Route not deleted")
- return
- }
+ Namespace("default").Get(s.T().Context(), "a-managed-route-to-delete", metav1.GetOptions{})
+ s.False(rErr == nil && r != nil && r.GetDeletionTimestamp() == nil, "Route not deleted")
})
})
}
-func TestPodsLog(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("pods_log with nil name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_log", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod log, missing argument name" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("pods_log with not found name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_log", map[string]interface{}{"name": "not-found"})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found log in namespace : pods \"not-found\" not found" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- podsLogNilNamespace, err := c.callTool("pods_log", map[string]interface{}{
+func (s *PodsSuite) TestPodsLog() {
+ s.InitMcpClient()
+ s.Run("pods_log with nil name returns error", func() {
+ toolResult, _ := s.CallTool("pods_log", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get pod log, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_log with not found name returns error", func() {
+ toolResult, _ := s.CallTool("pods_log", map[string]interface{}{"name": "not-found"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get pod not-found log in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_log(name=a-pod-in-default, namespace=nil), uses configured namespace", func() {
+ podsLogNilNamespace, err := s.CallTool("pods_log", map[string]interface{}{
"name": "a-pod-in-default",
})
- t.Run("pods_log with name and nil namespace returns pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsLogNilNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- podsLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsLogNilNamespace.IsError, "call tool failed")
+ })
+ s.Run("pods_log(name=a-pod-in-ns-1, namespace=ns-1)", func() {
+ podsLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
})
- t.Run("pods_log with name and namespace returns pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsLogInNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- podsContainerLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsLogInNamespace.IsError, "call tool failed")
+ })
+ s.Run("pods_log(name=a-pod-in-ns-1, namespace=ns-1, container=nginx)", func() {
+ podsContainerLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"container": "nginx",
})
- t.Run("pods_log with name, container and namespace returns pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsContainerLogInNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- toolResult, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsContainerLogInNamespace.IsError, "call tool failed")
+ })
+ s.Run("with non existing container returns error", func() {
+ toolResult, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"container": "a-not-existing-container",
})
- t.Run("pods_log with non existing container returns error", func(t *testing.T) {
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod a-pod-in-ns-1 log in namespace ns-1: container a-not-existing-container is not valid for pod a-pod-in-ns-1" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- podsPreviousLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool should not return error object")
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get pod a-pod-in-ns-1 log in namespace ns-1: container a-not-existing-container is not valid for pod a-pod-in-ns-1", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("pods_log(previous=true) returns previous pod log", func() {
+ podsPreviousLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"previous": true,
})
- t.Run("pods_log with previous=true returns previous pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsPreviousLogInNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- podsPreviousLogFalse, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsPreviousLogInNamespace.IsError, "call tool failed")
+ })
+ s.Run("pods_log(previous=false) returns current pod log", func() {
+ podsPreviousLogFalse, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"previous": false,
})
- t.Run("pods_log with previous=false returns current pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsPreviousLogFalse.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
-
- // Test with tail parameter
- podsTailLines, err := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsPreviousLogFalse.IsError, "call tool failed")
+ })
+ s.Run("pods_log(tail=50) returns pod log", func() {
+ podsTailLines, err := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"tail": 50,
})
- t.Run("pods_log with tail=50 returns pod log", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if podsTailLines.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
-
- // Test with invalid tail parameter
- podsInvalidTailLines, _ := c.callTool("pods_log", map[string]interface{}{
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(podsTailLines.IsError, "call tool failed")
+ })
+ s.Run("with invalid tail returns error", func() {
+ podsInvalidTailLines, _ := s.CallTool("pods_log", map[string]interface{}{
"namespace": "ns-1",
"name": "a-pod-in-ns-1",
"tail": "invalid",
})
- t.Run("pods_log with invalid tail returns error", func(t *testing.T) {
- if !podsInvalidTailLines.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- expectedErrorMsg := "failed to parse tail parameter: expected integer"
- if errMsg := podsInvalidTailLines.Content[0].(mcp.TextContent).Text; !strings.Contains(errMsg, expectedErrorMsg) {
- t.Fatalf("unexpected error message, expected to contain '%s', got '%s'", expectedErrorMsg, errMsg)
- return
- }
- })
+ s.Truef(podsInvalidTailLines.IsError, "call tool should fail")
+ expectedErrorMsg := "failed to parse tail parameter: expected integer"
+ errMsg := podsInvalidTailLines.Content[0].(mcp.TextContent).Text
+ s.Containsf(errMsg, expectedErrorMsg, "unexpected error message, expected to contain '%s', got '%s'", expectedErrorMsg, errMsg)
})
}
-func TestPodsLogDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsSuite) TestPodsLogDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsLog, _ := c.callTool("pods_log", map[string]interface{}{"name": "a-pod-in-default"})
- t.Run("pods_log has error", func(t *testing.T) {
- if !podsLog.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("pods_log describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("pods_log (denied)", func() {
+ podsLog, err := s.CallTool("pods_log", map[string]interface{}{"name": "a-pod-in-default"})
+ s.Run("has error", func() {
+ s.Truef(podsLog.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to get pod a-pod-in-default log in namespace : resource not allowed: /v1, Kind=Pod"
- if podsLog.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsLog.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, podsLog.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, podsLog.Content[0].(mcp.TextContent).Text)
})
})
}
-func TestPodsRun(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("pods_run with nil image returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("pods_run", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Errorf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to run pod, missing argument image" {
- t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- podsRunNilNamespace, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx"})
- t.Run("pods_run with image and nil namespace runs pod", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsRunNilNamespace.IsError {
- t.Errorf("call tool failed")
- return
- }
- })
- var decodedNilNamespace []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(podsRunNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace)
- t.Run("pods_run with image and nil namespace has yaml content", func(t *testing.T) {
- if err != nil {
- t.Errorf("invalid tool result content %v", err)
- return
- }
- })
- t.Run("pods_run with image and nil namespace returns 1 item (Pod)", func(t *testing.T) {
- if len(decodedNilNamespace) != 1 {
- t.Errorf("invalid pods count, expected 1, got %v", len(decodedNilNamespace))
- return
- }
- if decodedNilNamespace[0].GetKind() != "Pod" {
- t.Errorf("invalid pod kind, expected Pod, got %v", decodedNilNamespace[0].GetKind())
- return
- }
- })
- t.Run("pods_run with image and nil namespace returns pod in default", func(t *testing.T) {
- if decodedNilNamespace[0].GetNamespace() != "default" {
- t.Errorf("invalid pod namespace, expected default, got %v", decodedNilNamespace[0].GetNamespace())
- return
- }
+func (s *PodsSuite) TestPodsListWithLabelSelector() {
+ s.InitMcpClient()
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ // Create pods with labels
+ _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod-with-labels",
+ Labels: map[string]string{"app": "test", "env": "dev"},
+ },
+ Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
+ }, metav1.CreateOptions{})
+ _, _ = kc.CoreV1().Pods("ns-1").Create(s.T().Context(), &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-pod-with-labels",
+ Labels: map[string]string{"app": "test", "env": "prod"},
+ },
+ Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
+ }, metav1.CreateOptions{})
+
+ s.Run("pods_list(labelSelector=app=test) returns filtered pods from configured namespace", func() {
+ toolResult, err := s.CallTool("pods_list", map[string]interface{}{
+ "labelSelector": "app=test",
})
- t.Run("pods_run with image and nil namespace returns pod with random name", func(t *testing.T) {
- if !strings.HasPrefix(decodedNilNamespace[0].GetName(), "kubernetes-mcp-server-run-") {
- t.Errorf("invalid pod name, expected random, got %v", decodedNilNamespace[0].GetName())
- return
- }
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
- t.Run("pods_run with image and nil namespace returns pod with labels", func(t *testing.T) {
- labels := decodedNilNamespace[0].Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{})
- if labels["app.kubernetes.io/name"] == "" {
- t.Errorf("invalid labels, expected app.kubernetes.io/name, got %v", labels)
- return
- }
- if labels["app.kubernetes.io/component"] == "" {
- t.Errorf("invalid labels, expected app.kubernetes.io/component, got %v", labels)
- return
- }
- if labels["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" {
- t.Errorf("invalid labels, expected app.kubernetes.io/managed-by, got %v", labels)
- return
- }
- if labels["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" {
- t.Errorf("invalid labels, expected app.kubernetes.io/part-of, got %v", labels)
- return
- }
+ var decoded []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_run with image and nil namespace returns pod with nginx container", func(t *testing.T) {
- containers := decodedNilNamespace[0].Object["spec"].(map[string]interface{})["containers"].([]interface{})
- if containers[0].(map[string]interface{})["image"] != "nginx" {
- t.Errorf("invalid container name, expected nginx, got %v", containers[0].(map[string]interface{})["image"])
- return
- }
+ s.Run("returns 2 pods", func() {
+ s.Lenf(decoded, 2, "invalid pods count, expected 2, got %v", len(decoded))
})
+ })
- podsRunNamespaceAndPort, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80})
- t.Run("pods_run with image, namespace, and port runs pod", func(t *testing.T) {
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsRunNamespaceAndPort.IsError {
- t.Errorf("call tool failed")
- return
- }
+ s.Run("pods_list_in_namespace(labelSelector=env=prod, namespace=ns-1) returns filtered pods", func() {
+ toolResult, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{
+ "namespace": "ns-1",
+ "labelSelector": "env=prod",
})
- var decodedNamespaceAndPort []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(podsRunNamespaceAndPort.Content[0].(mcp.TextContent).Text), &decodedNamespaceAndPort)
- t.Run("pods_run with image, namespace, and port has yaml content", func(t *testing.T) {
- if err != nil {
- t.Errorf("invalid tool result content %v", err)
- return
- }
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
- t.Run("pods_run with image, namespace, and port returns 2 items (Pod + Service)", func(t *testing.T) {
- if len(decodedNamespaceAndPort) != 2 {
- t.Errorf("invalid pods count, expected 2, got %v", len(decodedNamespaceAndPort))
- return
- }
- if decodedNamespaceAndPort[0].GetKind() != "Pod" {
- t.Errorf("invalid pod kind, expected Pod, got %v", decodedNamespaceAndPort[0].GetKind())
- return
- }
- if decodedNamespaceAndPort[1].GetKind() != "Service" {
- t.Errorf("invalid service kind, expected Service, got %v", decodedNamespaceAndPort[1].GetKind())
- return
- }
+ var decoded []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("pods_run with image, namespace, and port returns pod with port", func(t *testing.T) {
- containers := decodedNamespaceAndPort[0].Object["spec"].(map[string]interface{})["containers"].([]interface{})
- ports := containers[0].(map[string]interface{})["ports"].([]interface{})
- if ports[0].(map[string]interface{})["containerPort"] != int64(80) {
- t.Errorf("invalid container port, expected 80, got %v", ports[0].(map[string]interface{})["containerPort"])
- return
- }
+ s.Run("returns 1 pod", func() {
+ s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded))
})
- t.Run("pods_run with image, namespace, and port returns service with port and selector", func(t *testing.T) {
- ports := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["ports"].([]interface{})
- if ports[0].(map[string]interface{})["port"] != int64(80) {
- t.Errorf("invalid service port, expected 80, got %v", ports[0].(map[string]interface{})["port"])
- return
- }
- if ports[0].(map[string]interface{})["targetPort"] != int64(80) {
- t.Errorf("invalid service target port, expected 80, got %v", ports[0].(map[string]interface{})["targetPort"])
- return
- }
- selector := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["selector"].(map[string]interface{})
- if selector["app.kubernetes.io/name"] == "" {
- t.Errorf("invalid service selector, expected app.kubernetes.io/name, got %v", selector)
- return
- }
- if selector["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" {
- t.Errorf("invalid service selector, expected app.kubernetes.io/managed-by, got %v", selector)
- return
- }
- if selector["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" {
- t.Errorf("invalid service selector, expected app.kubernetes.io/part-of, got %v", selector)
- return
- }
+ s.Run("returns another-pod-with-labels", func() {
+ s.Equalf("another-pod-with-labels", decoded[0].GetName(), "invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName())
})
})
-}
-func TestPodsRunDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
- denied_resources = [ { version = "v1", kind = "Pod" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- podsRun, _ := c.callTool("pods_run", map[string]interface{}{"image": "nginx"})
- t.Run("pods_run has error", func(t *testing.T) {
- if !podsRun.IsError {
- t.Fatalf("call tool should fail")
- }
+ s.Run("pods_list(labelSelector=app=test,env=prod) with multiple label selectors returns filtered pods", func() {
+ toolResult, err := s.CallTool("pods_list", map[string]interface{}{
+ "labelSelector": "app=test,env=prod",
})
- t.Run("pods_run describes denial", func(t *testing.T) {
- expectedMessage := "failed to run pod in namespace : resource not allowed: /v1, Kind=Pod"
- if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text)
- }
- })
- })
-}
-
-func TestPodsRunInOpenShift(t *testing.T) {
- testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) {
- t.Run("pods_run with image, namespace, and port returns route with port", func(t *testing.T) {
- podsRunInOpenShift, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80})
- if err != nil {
- t.Errorf("call tool failed %v", err)
- return
- }
- if podsRunInOpenShift.IsError {
- t.Errorf("call tool failed")
- return
- }
- var decodedPodServiceRoute []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(podsRunInOpenShift.Content[0].(mcp.TextContent).Text), &decodedPodServiceRoute)
- if err != nil {
- t.Errorf("invalid tool result content %v", err)
- return
- }
- if len(decodedPodServiceRoute) != 3 {
- t.Errorf("invalid pods count, expected 3, got %v", len(decodedPodServiceRoute))
- return
- }
- if decodedPodServiceRoute[2].GetKind() != "Route" {
- t.Errorf("invalid route kind, expected Route, got %v", decodedPodServiceRoute[2].GetKind())
- return
- }
- targetPort := decodedPodServiceRoute[2].Object["spec"].(map[string]interface{})["port"].(map[string]interface{})["targetPort"].(int64)
- if targetPort != 80 {
- t.Errorf("invalid route target port, expected 80, got %v", targetPort)
- return
- }
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(toolResult.IsError, "call tool failed")
})
- })
-}
-
-func TestPodsListWithLabelSelector(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- kc := c.newKubernetesClient()
- // Create pods with labels
- _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pod-with-labels",
- Labels: map[string]string{"app": "test", "env": "dev"},
- },
- Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
- }, metav1.CreateOptions{})
- _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "another-pod-with-labels",
- Labels: map[string]string{"app": "test", "env": "prod"},
- },
- Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}},
- }, metav1.CreateOptions{})
-
- // Test pods_list with label selector
- t.Run("pods_list with label selector returns filtered pods", func(t *testing.T) {
- toolResult, err := c.callTool("pods_list", map[string]interface{}{
- "labelSelector": "app=test",
- })
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- return
- }
- var decoded []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
- if len(decoded) != 2 {
- t.Fatalf("invalid pods count, expected 2, got %v", len(decoded))
- return
- }
+ var decoded []unstructured.Unstructured
+ err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
-
- // Test pods_list_in_namespace with label selector
- t.Run("pods_list_in_namespace with label selector returns filtered pods", func(t *testing.T) {
- toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{
- "namespace": "ns-1",
- "labelSelector": "env=prod",
- })
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- return
- }
- var decoded []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
- if len(decoded) != 1 {
- t.Fatalf("invalid pods count, expected 1, got %v", len(decoded))
- return
- }
- if decoded[0].GetName() != "another-pod-with-labels" {
- t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName())
- return
- }
+ s.Run("returns 1 pod", func() {
+ s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded))
})
-
- // Test multiple label selectors
- t.Run("pods_list with multiple label selectors returns filtered pods", func(t *testing.T) {
- toolResult, err := c.callTool("pods_list", map[string]interface{}{
- "labelSelector": "app=test,env=prod",
- })
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if toolResult.IsError {
- t.Fatalf("call tool failed")
- return
- }
- var decoded []unstructured.Unstructured
- err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
- if len(decoded) != 1 {
- t.Fatalf("invalid pods count, expected 1, got %v", len(decoded))
- return
- }
- if decoded[0].GetName() != "another-pod-with-labels" {
- t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName())
- return
- }
+ s.Run("returns another-pod-with-labels", func() {
+ s.Equalf("another-pod-with-labels", decoded[0].GetName(), "invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName())
})
})
}
+
+func TestPods(t *testing.T) {
+ suite.Run(t, new(PodsSuite))
+}
diff --git a/pkg/mcp/pods_top_test.go b/pkg/mcp/pods_top_test.go
index 9fd218bb..92f6505a 100644
--- a/pkg/mcp/pods_top_test.go
+++ b/pkg/mcp/pods_top_test.go
@@ -5,247 +5,246 @@ import (
"regexp"
"testing"
+ "github.com/BurntSushi/toml"
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/mark3labs/mcp-go/mcp"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
)
-func TestPodsTopMetricsUnavailable(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- mockServer := test.NewMockServer()
- defer mockServer.Close()
- c.withKubeConfig(mockServer.Config())
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
- if req.URL.Path == "/api" {
- _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
- return
- }
- // Request Performed by DiscoveryClient to Kube API (Get API Groups)
- if req.URL.Path == "/apis" {
- _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
- return
- }
- }))
- podsTopMetricsApiUnavailable, err := c.callTool("pods_top", map[string]interface{}{})
- t.Run("pods_top with metrics API not available", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if !podsTopMetricsApiUnavailable.IsError {
- t.Errorf("call tool should have returned an error")
- }
- if podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text != "failed to get pods top: metrics API is not available" {
- t.Errorf("call tool returned unexpected content: %s", podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text)
- }
- })
+type PodsTopSuite struct {
+ BaseMcpSuite
+ mockServer *test.MockServer
+}
+
+func (s *PodsTopSuite) SetupTest() {
+ s.BaseMcpSuite.SetupTest()
+ s.mockServer = test.NewMockServer()
+ s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T())
+}
+
+func (s *PodsTopSuite) TearDownTest() {
+ s.BaseMcpSuite.TearDownTest()
+ if s.mockServer != nil {
+ s.mockServer.Close()
+ }
+}
+
+func (s *PodsTopSuite) TestPodsTopMetricsUnavailable() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
+ if req.URL.Path == "/api" {
+ _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups)
+ if req.URL.Path == "/apis" {
+ _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
+ return
+ }
+ }))
+ s.InitMcpClient()
+
+ s.Run("pods_top with metrics API not available", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{})
+ s.NoError(err, "call tool failed %v", err)
+ s.Require().NoError(err)
+ s.True(result.IsError, "call tool should have returned an error")
+ s.Equalf("failed to get pods top: metrics API is not available", result.Content[0].(mcp.TextContent).Text,
+ "call tool returned unexpected content: %s", result.Content[0].(mcp.TextContent).Text)
})
}
-func TestPodsTopMetricsAvailable(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- mockServer := test.NewMockServer()
- defer mockServer.Close()
- c.withKubeConfig(mockServer.Config())
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- println("Request received:", req.Method, req.URL.Path) // TODO: REMOVE LINE
- w.Header().Set("Content-Type", "application/json")
- // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
- if req.URL.Path == "/api" {
- _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
- return
- }
- // Request Performed by DiscoveryClient to Kube API (Get API Groups)
- if req.URL.Path == "/apis" {
- _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
- return
- }
- // Request Performed by DiscoveryClient to Kube API (Get API Resources)
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" {
- _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`))
- return
- }
- // Pod Metrics from all namespaces
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" {
- if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" {
- _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
- `{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi","swap":"42Mi"}}]}` +
- `]}`))
- } else {
- _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
- `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi","swap":"37Mi"}}]},` +
- `{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi","swap":"42Mi"}}]}` +
- `]}`))
-
- }
- return
- }
- // Pod Metrics from configured namespace
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" {
+func (s *PodsTopSuite) TestPodsTopMetricsAvailable() {
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
+ if req.URL.Path == "/api" {
+ _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups)
+ if req.URL.Path == "/apis" {
+ _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Resources)
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" {
+ _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`))
+ return
+ }
+ // Pod Metrics from all namespaces
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" {
+ if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" {
_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
- `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi","swap":"37Mi"}}]}` +
+ `{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi","swap":"42Mi"}}]}` +
`]}`))
- return
- }
- // Pod Metrics from ns-5 namespace
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" {
+ } else {
_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
- `{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"42Mi"}}]}` +
+ `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi","swap":"37Mi"}}]},` +
+ `{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi","swap":"42Mi"}}]}` +
`]}`))
- return
- }
- // Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" {
- _, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` +
- `"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` +
- `"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi","swap":"42Mi"}}]` +
- `}`))
- }
- }))
- podsTopDefaults, err := c.callTool("pods_top", map[string]interface{}{})
- t.Run("pods_top defaults returns pod metrics from all namespaces", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- textContent := podsTopDefaults.Content[0].(mcp.TextContent).Text
- if podsTopDefaults.IsError {
- t.Fatalf("call tool failed %s", textContent)
- }
- expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s+SWAP\(bytes\)\s*$`)
- if !expectedHeaders.MatchString(textContent) {
- t.Errorf("Expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent)
- }
- expectedRows := []string{
- "default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi\\s+13Mi",
- "default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi\\s+37Mi",
- "ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi\\s+42Mi",
- }
- for _, row := range expectedRows {
- if !regexp.MustCompile(row).MatchString(textContent) {
- t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent)
- }
- }
- expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s+92Mi\s*$`)
- if !expectedTotal.MatchString(textContent) {
- t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
+
}
- })
- podsTopConfiguredNamespace, err := c.callTool("pods_top", map[string]interface{}{
+ return
+ }
+ // Pod Metrics from configured namespace
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" {
+ _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
+ `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi","swap":"37Mi"}}]}` +
+ `]}`))
+ return
+ }
+ // Pod Metrics from ns-5 namespace
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" {
+ _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` +
+ `{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"42Mi"}}]}` +
+ `]}`))
+ return
+ }
+ // Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" {
+ _, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` +
+ `"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` +
+ `"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi","swap":"42Mi"}}]` +
+ `}`))
+ }
+ }))
+ s.InitMcpClient()
+
+ s.Run("pods_top(defaults) returns pod metrics from all namespaces", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{})
+ s.Require().NotNil(result)
+ s.NoErrorf(err, "call tool failed %v", err)
+ textContent := result.Content[0].(mcp.TextContent).Text
+ s.Falsef(result.IsError, "call tool failed %v", textContent)
+
+ expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s+SWAP\(bytes\)\s*$`)
+ s.Regexpf(expectedHeaders, textContent, "expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent)
+ expectedRows := []string{
+ "default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi\\s+13Mi",
+ "default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi\\s+37Mi",
+ "ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi\\s+42Mi",
+ }
+
+ for _, row := range expectedRows {
+ s.Regexpf(row, textContent, "expected row '%s' not found in output:\n%s", row, textContent)
+ }
+
+ expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s+92Mi\s*$`)
+ s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
+ })
+
+ s.Run("pods_top(allNamespaces=false) returns pod metrics from configured namespace", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{
"all_namespaces": false,
})
- t.Run("pods_top[allNamespaces=false] returns pod metrics from configured namespace", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- textContent := podsTopConfiguredNamespace.Content[0].(mcp.TextContent).Text
- expectedRows := []string{
- "default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi\\s+13Mi",
- "default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi\\s+37Mi",
- }
- for _, row := range expectedRows {
- if !regexp.MustCompile(row).MatchString(textContent) {
- t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent)
- }
- }
- expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s+50Mi\s*$`)
- if !expectedTotal.MatchString(textContent) {
- t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
- }
- })
- podsTopNamespace, err := c.callTool("pods_top", map[string]interface{}{
+ s.Require().NotNil(result)
+ s.NoErrorf(err, "call tool failed %v", err)
+ textContent := result.Content[0].(mcp.TextContent).Text
+ s.Falsef(result.IsError, "call tool failed %v", textContent)
+
+ expectedRows := []string{
+ "default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi\\s+13Mi",
+ "default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi\\s+37Mi",
+ }
+ for _, row := range expectedRows {
+ s.Regexpf(row, textContent, "expected row '%s' not found in output:\n%s", row, textContent)
+ }
+
+ expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s+50Mi\s*$`)
+ s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
+ })
+
+ s.Run("pods_top(namespace=ns-5) returns pod metrics from provided namespace", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{
"namespace": "ns-5",
})
- t.Run("pods_top[namespace=ns-5] returns pod metrics from provided namespace", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- textContent := podsTopNamespace.Content[0].(mcp.TextContent).Text
- expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi\s+42Mi`)
- if !expectedRow.MatchString(textContent) {
- t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
- }
- expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s+42Mi\s*$`)
- if !expectedTotal.MatchString(textContent) {
- t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
- }
- })
- podsTopNamespaceName, err := c.callTool("pods_top", map[string]interface{}{
+ s.Require().NotNil(result)
+ s.NoErrorf(err, "call tool failed %v", err)
+ textContent := result.Content[0].(mcp.TextContent).Text
+ s.Falsef(result.IsError, "call tool failed %v", textContent)
+
+ expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi\s+42Mi`)
+ s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
+
+ expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s+42Mi\s*$`)
+ s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
+ })
+
+ s.Run("pods_top(namespace=ns-5,name=pod-ns-5-5) returns pod metrics from provided namespace and name", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{
"namespace": "ns-5",
"name": "pod-ns-5-5",
})
- t.Run("pods_top[namespace=ns-5,name=pod-ns-5-5] returns pod metrics from provided namespace and name", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- textContent := podsTopNamespaceName.Content[0].(mcp.TextContent).Text
- expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi\s+42Mi`)
- if !expectedRow.MatchString(textContent) {
- t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
- }
- expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s+42Mi\s*$`)
- if !expectedTotal.MatchString(textContent) {
- t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
- }
- })
- podsTopNamespaceLabelSelector, err := c.callTool("pods_top", map[string]interface{}{
+ s.Require().NotNil(result)
+ s.NoErrorf(err, "call tool failed %v", err)
+ textContent := result.Content[0].(mcp.TextContent).Text
+ s.Falsef(result.IsError, "call tool failed %v", textContent)
+
+ expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi\s+42Mi`)
+ s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
+
+ expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s+42Mi\s*$`)
+ s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
+ })
+
+ s.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{
"label_selector": "app=pod-ns-5-42",
})
- t.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- textContent := podsTopNamespaceLabelSelector.Content[0].(mcp.TextContent).Text
- expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`)
- if !expectedRow.MatchString(textContent) {
- t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
- }
- expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s+42Mi\s*$`)
- if !expectedTotal.MatchString(textContent) {
- t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
- }
- })
+ s.Require().NotNil(result)
+ s.NoErrorf(err, "call tool failed %v", err)
+ textContent := result.Content[0].(mcp.TextContent).Text
+ s.Falsef(result.IsError, "call tool failed %v", textContent)
+
+ expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`)
+ s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent)
+
+ expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s+42Mi\s*$`)
+ s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent)
})
}
-func TestPodsTopDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *PodsTopSuite) TestPodsTopDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [ { group = "metrics.k8s.io", version = "v1beta1" } ]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- mockServer := test.NewMockServer()
- defer mockServer.Close()
- c.withKubeConfig(mockServer.Config())
- mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
- if req.URL.Path == "/api" {
- _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
- return
- }
- // Request Performed by DiscoveryClient to Kube API (Get API Groups)
- if req.URL.Path == "/apis" {
- _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
- return
- }
- // Request Performed by DiscoveryClient to Kube API (Get API Resources)
- if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" {
- _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`))
- return
- }
- }))
- podsTop, _ := c.callTool("pods_top", map[string]interface{}{})
- t.Run("pods_run has error", func(t *testing.T) {
- if !podsTop.IsError {
- t.Fatalf("call tool should fail")
- }
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-)
+ if req.URL.Path == "/api" {
+ _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Groups)
+ if req.URL.Path == "/apis" {
+ _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`))
+ return
+ }
+ // Request Performed by DiscoveryClient to Kube API (Get API Resources)
+ if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" {
+ _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`))
+ return
+ }
+ }))
+ s.InitMcpClient()
+
+ s.Run("pods_top (denied)", func() {
+ result, err := s.CallTool("pods_top", map[string]interface{}{})
+ s.Require().NotNil(result, "toolResult should not be nil")
+ s.Run("has error", func() {
+ s.Truef(result.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("pods_run describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to get pods top: resource not allowed: metrics.k8s.io/v1beta1, Kind=PodMetrics"
- if podsTop.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsTop.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, result.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, result.Content[0].(mcp.TextContent).Text)
})
})
}
+
+func TestPodsTop(t *testing.T) {
+ suite.Run(t, new(PodsTopSuite))
+}
diff --git a/pkg/mcp/resources_test.go b/pkg/mcp/resources_test.go
index 3aa7b875..21329d20 100644
--- a/pkg/mcp/resources_test.go
+++ b/pkg/mcp/resources_test.go
@@ -5,220 +5,168 @@ import (
"strings"
"testing"
+ "github.com/BurntSushi/toml"
"github.com/mark3labs/mcp-go/mcp"
+ "github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/rbac/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
"sigs.k8s.io/yaml"
-
- "github.com/containers/kubernetes-mcp-server/internal/test"
- "github.com/containers/kubernetes-mcp-server/pkg/config"
- "github.com/containers/kubernetes-mcp-server/pkg/output"
)
-func TestResourcesList(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("resources_list with missing apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_list", map[string]interface{}{})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- }
- })
- t.Run("resources_list with missing kind returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument kind" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- }
- })
- t.Run("resources_list with invalid apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, invalid argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- }
- })
- t.Run("resources_list with nonexistent apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- }
- if toolResult.Content[0].(mcp.TextContent).Text != `failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- }
- })
- namespaces, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
- t.Run("resources_list returns namespaces", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if namespaces.IsError {
- t.Fatalf("call tool failed")
- return
- }
+type ResourcesSuite struct {
+ BaseMcpSuite
+}
+
+func (s *ResourcesSuite) TestResourcesList() {
+ s.InitMcpClient()
+ s.Run("resources_list with missing apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_list", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to list resources, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_list with missing kind returns error", func() {
+ toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to list resources, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_list with invalid apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to list resources, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_list with nonexistent apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf(`failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`,
+ toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_list(apiVersion=v1, kind=Namespace) returns namespaces", func() {
+ namespaces, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(namespaces.IsError, "call tool failed")
})
var decodedNamespaces []unstructured.Unstructured
err = yaml.Unmarshal([]byte(namespaces.Content[0].(mcp.TextContent).Text), &decodedNamespaces)
- t.Run("resources_list has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("resources_list returns more than 2 items", func(t *testing.T) {
- if len(decodedNamespaces) < 3 {
- t.Fatalf("invalid namespace count, expected >2, got %v", len(decodedNamespaces))
- }
+ s.Run("returns more than 2 items", func() {
+ s.Truef(len(decodedNamespaces) >= 3, "invalid namespace count, expected >2, got %v", len(decodedNamespaces))
})
-
- // Test label selector functionality
- t.Run("resources_list with label selector returns filtered pods", func(t *testing.T) {
-
- // List pods with label selector
- result, err := c.callTool("resources_list", map[string]interface{}{
+ })
+ s.Run("resources_list with label selector returns filtered pods", func() {
+ s.Run("list pods with app=nginx label", func() {
+ result, err := s.CallTool("resources_list", map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"namespace": "default",
"labelSelector": "app=nginx",
})
-
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if result.IsError {
- t.Fatalf("call tool failed")
- return
- }
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(result.IsError, "call tool failed")
var decodedPods []unstructured.Unstructured
err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods)
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
-
- // Verify only the pod with matching label is returned
- if len(decodedPods) != 1 {
- t.Fatalf("expected 1 pod, got %d", len(decodedPods))
- return
- }
+ s.Nilf(err, "invalid tool result content %v", err)
- if decodedPods[0].GetName() != "a-pod-in-default" {
- t.Fatalf("expected pod-with-label, got %s", decodedPods[0].GetName())
- return
- }
-
- // Test that multiple label selectors work
- result, err = c.callTool("resources_list", map[string]interface{}{
+ s.Lenf(decodedPods, 1, "expected 1 pod, got %d", len(decodedPods))
+ s.Equalf("a-pod-in-default", decodedPods[0].GetName(), "expected a-pod-in-default, got %s", decodedPods[0].GetName())
+ })
+ s.Run("list pods with multiple label selectors", func() {
+ result, err := s.CallTool("resources_list", map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"namespace": "default",
"labelSelector": "test-label=test-value,another=value",
})
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(result.IsError, "call tool failed")
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if result.IsError {
- t.Fatalf("call tool failed")
- return
- }
-
+ var decodedPods []unstructured.Unstructured
err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods)
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
+ s.Nilf(err, "invalid tool result content %v", err)
- // Verify no pods match multiple label selector
- if len(decodedPods) != 0 {
- t.Fatalf("expected 0 pods, got %d", len(decodedPods))
- return
- }
+ s.Lenf(decodedPods, 0, "expected 0 pods, got %d", len(decodedPods))
})
})
}
-func TestResourcesListDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *ResourcesSuite) TestResourcesListDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [
{ version = "v1", kind = "Secret" },
{ group = "rbac.authorization.k8s.io", version = "v1" }
]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- deniedByKind, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"})
- t.Run("resources_list (denied by kind) has error", func(t *testing.T) {
- if !deniedByKind.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("resources_list (denied by kind) describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("resources_list (denied by kind)", func() {
+ deniedByKind, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"})
+ s.Run("has error", func() {
+ s.Truef(deniedByKind.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to list resources: resource not allowed: /v1, Kind=Secret"
- if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
})
- deniedByGroup, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"})
- t.Run("resources_list (denied by group) has error", func(t *testing.T) {
- if !deniedByGroup.IsError {
- t.Fatalf("call tool should fail")
- }
+ })
+ s.Run("resources_list (denied by group)", func() {
+ deniedByGroup, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"})
+ s.Run("has error", func() {
+ s.Truef(deniedByGroup.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("resources_list (denied by group) describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to list resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role"
- if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
- })
- allowedResource, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
- t.Run("resources_list (not denied) returns list", func(t *testing.T) {
- if allowedResource.IsError {
- t.Fatalf("call tool should not fail")
- }
+ s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text)
})
})
+ s.Run("resources_list (not denied) returns list", func() {
+ allowedResource, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
+ s.Falsef(allowedResource.IsError, "call tool should not fail")
+ })
}
-func TestResourcesListAsTable(t *testing.T) {
- testCaseWithContext(t, &mcpContext{listOutput: output.Table, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) {
- c.withEnvTest()
- kc := c.newKubernetesClient()
- _, _ = kc.CoreV1().ConfigMaps("default").Create(t.Context(), &corev1.ConfigMap{
+func (s *ResourcesSuite) TestResourcesListAsTable() {
+ s.Cfg.ListOutput = "table"
+ s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift")
+ s.T().Cleanup(func() {
+ s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration")
+ })
+ s.InitMcpClient()
+
+ s.Run("resources_list(apiVersion=v1, kind=ConfigMap) (list_output=table)", func() {
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().ConfigMaps("default").Create(s.T().Context(), &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-list-as-table", Labels: map[string]string{"resource": "config-map"}},
Data: map[string]string{"key": "value"},
}, metav1.CreateOptions{})
- configMapList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"})
- t.Run("resources_list returns ConfigMap list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if configMapList.IsError {
- t.Fatalf("call tool failed")
- }
+ configMapList, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(configMapList.IsError, "call tool failed")
})
+ s.Require().NotNil(configMapList, "Expected tool result from call")
outConfigMapList := configMapList.Content[0].(mcp.TextContent).Text
- t.Run("resources_list returns column headers for ConfigMap list", func(t *testing.T) {
+ s.Run("returns column headers for ConfigMap list", func() {
expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+DATA\\s+AGE\\s+LABELS"
- if m, e := regexp.MatchString(expectedHeaders, outConfigMapList); !m || e != nil {
- t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList)
- }
+ m, e := regexp.MatchString(expectedHeaders, outConfigMapList)
+ s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList)
+ s.NoErrorf(e, "Error matching headers regex: %v", e)
})
- t.Run("resources_list returns formatted row for a-configmap-to-list-as-table", func(t *testing.T) {
+ s.Run("returns formatted row for a-configmap-to-list-as-table", func() {
expectedRow := "(?default)\\s+" +
"(?v1)\\s+" +
"(?ConfigMap)\\s+" +
@@ -226,274 +174,200 @@ func TestResourcesListAsTable(t *testing.T) {
"(?1)\\s+" +
"(?(\\d+m)?(\\d+s)?)\\s+" +
"(?resource=config-map)"
- if m, e := regexp.MatchString(expectedRow, outConfigMapList); !m || e != nil {
- t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList)
- }
+ m, e := regexp.MatchString(expectedRow, outConfigMapList)
+ s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList)
+ s.NoErrorf(e, "Error matching row regex: %v", e)
})
- // Custom Resource List
+ })
+
+ s.Run("resources_list(apiVersion=route.openshift.io/v1, kind=Route) (list_output=table)", func() {
_, _ = dynamic.NewForConfigOrDie(envTestRestConfig).
Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}).
Namespace("default").
- Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{
+ Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "route.openshift.io/v1",
"kind": "Route",
"metadata": map[string]interface{}{
"name": "an-openshift-route-to-list-as-table",
},
}}, metav1.CreateOptions{})
- routeList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"})
- t.Run("resources_list returns Route list", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- }
- if routeList.IsError {
- t.Fatalf("call tool failed")
- }
+ routeList, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(routeList.IsError, "call tool failed")
})
+ s.Require().NotNil(routeList, "Expected tool result from call")
outRouteList := routeList.Content[0].(mcp.TextContent).Text
- t.Run("resources_list returns column headers for Route list", func(t *testing.T) {
+ s.Run("returns column headers for Route list", func() {
expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+AGE\\s+LABELS"
- if m, e := regexp.MatchString(expectedHeaders, outRouteList); !m || e != nil {
- t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList)
- }
+ m, e := regexp.MatchString(expectedHeaders, outRouteList)
+ s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList)
+ s.NoErrorf(e, "Error matching headers regex: %v", e)
})
- t.Run("resources_list returns formatted row for an-openshift-route-to-list-as-table", func(t *testing.T) {
+ s.Run("returns formatted row for an-openshift-route-to-list-as-table", func() {
expectedRow := "(?default)\\s+" +
"(?route.openshift.io/v1)\\s+" +
"(?Route)\\s+" +
"(?an-openshift-route-to-list-as-table)\\s+" +
"(?(\\d+m)?(\\d+s)?)\\s+" +
"(?)"
- if m, e := regexp.MatchString(expectedRow, outRouteList); !m || e != nil {
- t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outRouteList)
- }
+ m, e := regexp.MatchString(expectedRow, outRouteList)
+ s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outRouteList)
+ s.NoErrorf(e, "Error matching row regex: %v", e)
})
})
}
-func TestResourcesGet(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("resources_get with missing apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_get", map[string]interface{}{})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_get with missing kind returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument kind" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_get with invalid apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, invalid argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_get with nonexistent apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != `failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_get with missing name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument name" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- namespace, err := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"})
- t.Run("resources_get returns namespace", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if namespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
+func (s *ResourcesSuite) TestResourcesGet() {
+ s.InitMcpClient()
+ s.Run("resources_get with missing apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_get", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get resource, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_get with missing kind returns error", func() {
+ toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get resource, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_get with invalid apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get resource, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_get with nonexistent apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf(`failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`,
+ toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_get with missing name returns error", func() {
+ toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to get resource, missing argument name", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_get returns namespace", func() {
+ namespace, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"})
+ s.Run("no error", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(namespace.IsError, "call tool failed")
})
var decodedNamespace unstructured.Unstructured
err = yaml.Unmarshal([]byte(namespace.Content[0].(mcp.TextContent).Text), &decodedNamespace)
- t.Run("resources_get has yaml content", func(t *testing.T) {
- if err != nil {
- t.Fatalf("invalid tool result content %v", err)
- return
- }
+ s.Run("has yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
})
- t.Run("resources_get returns default namespace", func(t *testing.T) {
- if decodedNamespace.GetName() != "default" {
- t.Fatalf("invalid namespace name, expected default, got %v", decodedNamespace.GetName())
- return
- }
+ s.Run("returns default namespace", func() {
+ s.Equalf("default", decodedNamespace.GetName(), "invalid namespace name, expected default, got %v", decodedNamespace.GetName())
})
})
}
-func TestResourcesGetDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *ResourcesSuite) TestResourcesGetDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [
{ version = "v1", kind = "Secret" },
{ group = "rbac.authorization.k8s.io", version = "v1" }
]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- kc := c.newKubernetesClient()
- _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"},
- }, metav1.CreateOptions{})
- _, _ = kc.RbacV1().Roles("default").Create(c.ctx, &v1.Role{
- ObjectMeta: metav1.ObjectMeta{Name: "denied-role"},
- }, metav1.CreateOptions{})
- deniedByKind, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"})
- t.Run("resources_get (denied by kind) has error", func(t *testing.T) {
- if !deniedByKind.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("resources_get (denied by kind) describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"},
+ }, metav1.CreateOptions{})
+ _, _ = kc.RbacV1().Roles("default").Create(s.T().Context(), &v1.Role{
+ ObjectMeta: metav1.ObjectMeta{Name: "denied-role"},
+ }, metav1.CreateOptions{})
+ s.Run("resources_get (denied by kind)", func() {
+ deniedByKind, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"})
+ s.Run("has error", func() {
+ s.Truef(deniedByKind.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to get resource: resource not allowed: /v1, Kind=Secret"
- if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
})
- deniedByGroup, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"})
- t.Run("resources_get (denied by group) has error", func(t *testing.T) {
- if !deniedByGroup.IsError {
- t.Fatalf("call tool should fail")
- }
+ })
+ s.Run("resources_get (denied by group)", func() {
+ deniedByGroup, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"})
+ s.Run("has error", func() {
+ s.Truef(deniedByGroup.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("resources_get (denied by group) describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to get resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role"
- if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
- })
- allowedResource, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"})
- t.Run("resources_get (not denied) returns resource", func(t *testing.T) {
- if allowedResource.IsError {
- t.Fatalf("call tool should not fail")
- }
+ s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text)
})
})
+ s.Run("resources_get (not denied) returns resource", func() {
+ allowedResource, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"})
+ s.Falsef(allowedResource.IsError, "call tool should not fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
}
-func TestResourcesCreateOrUpdate(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("resources_create_or_update with nil resource returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_create_or_update with empty resource returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": ""})
- if toolResult.IsError != true {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- client := c.newKubernetesClient()
+func (s *ResourcesSuite) TestResourcesCreateOrUpdate() {
+ s.InitMcpClient()
+ client := kubernetes.NewForConfigOrDie(envTestRestConfig)
+
+ s.Run("resources_create_or_update with nil resource returns error", func() {
+ toolResult, _ := s.CallTool("resources_create_or_update", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to create or update resources, missing argument resource", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_create_or_update with empty resource returns error", func() {
+ toolResult, _ := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": ""})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to create or update resources, missing argument resource", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+
+ s.Run("resources_create_or_update with valid namespaced yaml resource", func() {
configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n"
- resourcesCreateOrUpdateCm1, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml})
- t.Run("resources_create_or_update with valid namespaced yaml resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesCreateOrUpdateCm1.IsError {
- t.Errorf("call tool failed")
- return
- }
+ resourcesCreateOrUpdateCm1, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesCreateOrUpdateCm1.IsError, "call tool failed")
})
var decodedCreateOrUpdateCm1 []unstructured.Unstructured
err = yaml.Unmarshal([]byte(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text), &decodedCreateOrUpdateCm1)
- t.Run("resources_create_or_update with valid namespaced yaml resource returns yaml content", func(t *testing.T) {
- if err != nil {
- t.Errorf("invalid tool result content %v", err)
- return
- }
- if !strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully") {
- t.Errorf("Excpected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text)
- return
- }
- if len(decodedCreateOrUpdateCm1) != 1 {
- t.Errorf("invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1))
- return
- }
- if decodedCreateOrUpdateCm1[0].GetName() != "a-cm-created-or-updated" {
- t.Errorf("invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName())
- return
- }
- if decodedCreateOrUpdateCm1[0].GetUID() == "" {
- t.Errorf("invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID())
- return
- }
- })
- t.Run("resources_create_or_update with valid namespaced yaml resource creates ConfigMap", func(t *testing.T) {
- cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated", metav1.GetOptions{})
- if cm == nil {
- t.Fatalf("ConfigMap not found")
- return
- }
+ s.Run("returns yaml content", func() {
+ s.Nilf(err, "invalid tool result content %v", err)
+ s.Truef(strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully"),
+ "Expected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text)
+ s.Lenf(decodedCreateOrUpdateCm1, 1, "invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1))
+ s.Equalf("a-cm-created-or-updated", decodedCreateOrUpdateCm1[0].GetName(),
+ "invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName())
+ s.NotEmptyf(decodedCreateOrUpdateCm1[0].GetUID(), "invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID())
+ })
+ s.Run("creates ConfigMap", func() {
+ cm, _ := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-cm-created-or-updated", metav1.GetOptions{})
+ s.NotNil(cm, "ConfigMap not found")
})
+ })
+
+ s.Run("resources_create_or_update with valid namespaced json resource", func() {
configMapJson := "{\"apiVersion\": \"v1\", \"kind\": \"ConfigMap\", \"metadata\": {\"name\": \"a-cm-created-or-updated-2\", \"namespace\": \"default\"}}"
- resourcesCreateOrUpdateCm2, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson})
- t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesCreateOrUpdateCm2.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- t.Run("resources_create_or_update with valid namespaced json resource creates config map", func(t *testing.T) {
- cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated-2", metav1.GetOptions{})
- if cm == nil {
- t.Fatalf("ConfigMap not found")
- return
- }
+ resourcesCreateOrUpdateCm2, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesCreateOrUpdateCm2.IsError, "call tool failed")
+ })
+ s.Run("creates config map", func() {
+ cm, _ := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-cm-created-or-updated-2", metav1.GetOptions{})
+ s.NotNil(cm, "ConfigMap not found")
})
+ })
+
+ s.Run("resources_create_or_update with valid cluster-scoped json resource", func() {
customResourceDefinitionJson := `
{
"apiVersion": "apiextensions.k8s.io/v1",
@@ -509,284 +383,212 @@ func TestResourcesCreateOrUpdate(t *testing.T) {
"names": {"plural": "customs","singular": "custom","kind": "Custom"}
}
}`
- resourcesCreateOrUpdateCrd, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson})
- t.Run("resources_create_or_update with valid cluster-scoped json resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesCreateOrUpdateCrd.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- t.Run("resources_create_or_update with valid cluster-scoped json resource creates custom resource definition", func(t *testing.T) {
- apiExtensionsV1Client := c.newApiExtensionsClient()
- _, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, "customs.example.com", metav1.GetOptions{})
- if err != nil {
- t.Fatalf("custom resource definition not found")
- return
- }
- })
- c.crdWaitUntilReady("customs.example.com")
+ resourcesCreateOrUpdateCrd, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesCreateOrUpdateCrd.IsError, "call tool failed")
+ })
+ s.Run("creates custom resource definition", func() {
+ apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig)
+ _, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(s.T().Context(), "customs.example.com", metav1.GetOptions{})
+ s.Nilf(err, "custom resource definition not found")
+ })
+ s.Require().NoError(EnvTestCrdWaitUntilReady(s.T().Context(), "customs.example.com"))
+ })
+
+ s.Run("resources_create_or_update creates custom resource", func() {
customJson := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\"}}"
- resourcesCreateOrUpdateCustom, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJson})
- t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesCreateOrUpdateCustom.IsError {
- t.Fatalf("call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content)
- return
- }
- })
- t.Run("resources_create_or_update with valid namespaced json resource creates custom resource", func(t *testing.T) {
+ resourcesCreateOrUpdateCustom, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customJson})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesCreateOrUpdateCustom.IsError, "call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content)
+ })
+ s.Run("creates custom resource", func() {
dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig)
_, err = dynamicClient.
Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}).
Namespace("default").
- Get(c.ctx, "a-custom-resource", metav1.GetOptions{})
- if err != nil {
- t.Fatalf("custom resource not found")
- return
- }
+ Get(s.T().Context(), "a-custom-resource", metav1.GetOptions{})
+ s.Nilf(err, "custom resource not found")
})
+ })
+
+ s.Run("resources_create_or_update with valid namespaced json resource", func() {
customJsonUpdated := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\",\"annotations\": {\"updated\": \"true\"}}}"
- resourcesCreateOrUpdateCustomUpdated, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated})
- t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesCreateOrUpdateCustomUpdated.IsError {
- t.Fatalf("call tool failed")
- return
- }
- })
- t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) {
+ resourcesCreateOrUpdateCustomUpdated, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesCreateOrUpdateCustomUpdated.IsError, "call tool failed")
+ })
+ s.Run("updates custom resource", func() {
dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig)
customResource, _ := dynamicClient.
Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}).
Namespace("default").
- Get(c.ctx, "a-custom-resource", metav1.GetOptions{})
- if customResource == nil {
- t.Fatalf("custom resource not found")
- return
- }
+ Get(s.T().Context(), "a-custom-resource", metav1.GetOptions{})
+ s.NotNil(customResource, "custom resource not found")
annotations := customResource.GetAnnotations()
- if annotations == nil || annotations["updated"] != "true" {
- t.Fatalf("custom resource not updated")
- return
- }
+ s.Require().NotNil(annotations, "annotations should not be nil")
+ s.Equalf("true", annotations["updated"], "custom resource not updated")
})
})
}
-func TestResourcesCreateOrUpdateDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *ResourcesSuite) TestResourcesCreateOrUpdateDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [
{ version = "v1", kind = "Secret" },
{ group = "rbac.authorization.k8s.io", version = "v1" }
]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ s.Run("resources_create_or_update (denied by kind)", func() {
secretYaml := "apiVersion: v1\nkind: Secret\nmetadata:\n name: a-denied-secret\n namespace: default\n"
- deniedByKind, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml})
- t.Run("resources_create_or_update (denied by kind) has error", func(t *testing.T) {
- if !deniedByKind.IsError {
- t.Fatalf("call tool should fail")
- }
+ deniedByKind, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml})
+ s.Run("has error", func() {
+ s.Truef(deniedByKind.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("resources_create_or_update (denied by kind) describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to create or update resources: resource not allowed: /v1, Kind=Secret"
- if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
})
+ })
+ s.Run("resources_create_or_update (denied by group)", func() {
roleYaml := "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: a-denied-role\n namespace: default\n"
- deniedByGroup, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml})
- t.Run("resources_create_or_update (denied by group) has error", func(t *testing.T) {
- if !deniedByGroup.IsError {
- t.Fatalf("call tool should fail")
- }
+ deniedByGroup, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml})
+ s.Run("has error", func() {
+ s.Truef(deniedByGroup.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("resources_create_or_update (denied by group) describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to create or update resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role"
- if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text)
})
+ })
+ s.Run("resources_create_or_update (not denied) creates or updates resource", func() {
configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n"
- allowedResource, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml})
- t.Run("resources_create_or_update (not denied) creates or updates resource", func(t *testing.T) {
- if allowedResource.IsError {
- t.Fatalf("call tool should not fail")
- }
- })
+ allowedResource, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml})
+ s.Falsef(allowedResource.IsError, "call tool should not fail")
+ s.Nilf(err, "call tool should not return error object")
})
}
-func TestResourcesDelete(t *testing.T) {
- testCase(t, func(c *mcpContext) {
- c.withEnvTest()
- t.Run("resources_delete with missing apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with missing kind returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument kind" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with invalid apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, invalid argument apiVersion" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with nonexistent apiVersion returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with missing name returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument name" {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with nonexistent resource returns error", func(t *testing.T) {
- toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"})
- if !toolResult.IsError {
- t.Fatalf("call tool should fail")
- return
- }
- if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: configmaps "nonexistent-configmap" not found` {
- t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- resourcesDeleteCm, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"})
- t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesDeleteCm.IsError {
- t.Fatalf("call tool failed")
- return
- }
- if resourcesDeleteCm.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" {
- t.Fatalf("invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- client := c.newKubernetesClient()
- t.Run("resources_delete with valid namespaced resource deletes ConfigMap", func(t *testing.T) {
- _, err := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-configmap-to-delete", metav1.GetOptions{})
- if err == nil {
- t.Fatalf("ConfigMap not deleted")
- return
- }
- })
- resourcesDeleteNamespace, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"})
- t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) {
- if err != nil {
- t.Fatalf("call tool failed %v", err)
- return
- }
- if resourcesDeleteNamespace.IsError {
- t.Fatalf("call tool failed")
- return
- }
- if resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" {
- t.Fatalf("invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text)
- return
- }
- })
- t.Run("resources_delete with valid namespaced resource deletes Namespace", func(t *testing.T) {
- ns, err := client.CoreV1().Namespaces().Get(c.ctx, "ns-to-delete", metav1.GetOptions{})
- if err == nil && ns != nil && ns.DeletionTimestamp == nil {
- t.Fatalf("Namespace not deleted")
- return
- }
+func (s *ResourcesSuite) TestResourcesDelete() {
+ s.InitMcpClient()
+ client := kubernetes.NewForConfigOrDie(envTestRestConfig)
+
+ s.Run("resources_delete with missing apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete resource, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_delete with missing kind returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete resource, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_delete with invalid apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete resource, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_delete with nonexistent apiVersion returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf(`failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`,
+ toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_delete with missing name returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf("failed to delete resource, missing argument name", toolResult.Content[0].(mcp.TextContent).Text,
+ "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("resources_delete with nonexistent resource returns error", func() {
+ toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"})
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Equalf(`failed to delete resource: configmaps "nonexistent-configmap" not found`,
+ toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ })
+
+ s.Run("resources_delete with valid namespaced resource", func() {
+ resourcesDeleteCm, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesDeleteCm.IsError, "call tool failed")
+ s.Equalf("Resource deleted successfully", resourcesDeleteCm.Content[0].(mcp.TextContent).Text,
+ "invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run("deletes ConfigMap", func() {
+ _, err := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-configmap-to-delete", metav1.GetOptions{})
+ s.Error(err, "ConfigMap not deleted")
+ })
+ })
+
+ s.Run("resources_delete with valid cluster scoped resource", func() {
+ resourcesDeleteNamespace, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"})
+ s.Run("returns success", func() {
+ s.Nilf(err, "call tool failed %v", err)
+ s.Falsef(resourcesDeleteNamespace.IsError, "call tool failed")
+ s.Equalf("Resource deleted successfully", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text,
+ "invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text)
+ })
+ s.Run(" deletes Namespace", func() {
+ ns, err := client.CoreV1().Namespaces().Get(s.T().Context(), "ns-to-delete", metav1.GetOptions{})
+ s.Truef(err != nil || (ns != nil && ns.DeletionTimestamp != nil), "Namespace not deleted")
})
})
}
-func TestResourcesDeleteDenied(t *testing.T) {
- deniedResourcesServer := test.Must(config.ReadToml([]byte(`
+func (s *ResourcesSuite) TestResourcesDeleteDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
denied_resources = [
{ version = "v1", kind = "Secret" },
{ group = "rbac.authorization.k8s.io", version = "v1" }
]
- `)))
- testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) {
- c.withEnvTest()
- kc := c.newKubernetesClient()
- _, _ = kc.CoreV1().ConfigMaps("default").Create(c.ctx, &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"},
- }, metav1.CreateOptions{})
- deniedByKind, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"})
- t.Run("resources_delete (denied by kind) has error", func(t *testing.T) {
- if !deniedByKind.IsError {
- t.Fatalf("call tool should fail")
- }
- })
- t.Run("resources_delete (denied by kind) describes denial", func(t *testing.T) {
+ `), s.Cfg), "Expected to parse denied resources config")
+ s.InitMcpClient()
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, _ = kc.CoreV1().ConfigMaps("default").Create(s.T().Context(), &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"},
+ }, metav1.CreateOptions{})
+ s.Run("resources_delete (denied by kind)", func() {
+ deniedByKind, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"})
+ s.Run("has error", func() {
+ s.Truef(deniedByKind.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
expectedMessage := "failed to delete resource: resource not allowed: /v1, Kind=Secret"
- if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
+ s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
})
- deniedByGroup, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"})
- t.Run("resources_delete (denied by group) has error", func(t *testing.T) {
- if !deniedByGroup.IsError {
- t.Fatalf("call tool should fail")
- }
+ })
+ s.Run("resources_delete (denied by group)", func() {
+ deniedByGroup, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"})
+ s.Run("has error", func() {
+ s.Truef(deniedByGroup.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
})
- t.Run("resources_delete (denied by group) describes denial", func(t *testing.T) {
+ s.Run("describes denial", func() {
expectedMessage := "failed to delete resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role"
- if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage {
- t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text)
- }
- })
- allowedResource, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"})
- t.Run("resources_delete (not denied) deletes resource", func(t *testing.T) {
- if allowedResource.IsError {
- t.Fatalf("call tool should not fail")
- }
+ s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text,
+ "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text)
})
})
+ s.Run("resources_delete (not denied) deletes resource", func() {
+ allowedResource, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"})
+ s.Falsef(allowedResource.IsError, "call tool should not fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+}
+
+func TestResources(t *testing.T) {
+ suite.Run(t, new(ResourcesSuite))
}
diff --git a/pkg/mcp/testdata/toolsets-config-tools.json b/pkg/mcp/testdata/toolsets-config-tools.json
index c1767491..2c5b7ae8 100644
--- a/pkg/mcp/testdata/toolsets-config-tools.json
+++ b/pkg/mcp/testdata/toolsets-config-tools.json
@@ -4,7 +4,6 @@
"title": "Configuration: View",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
diff --git a/pkg/mcp/testdata/toolsets-core-tools.json b/pkg/mcp/testdata/toolsets-core-tools.json
index 43680dae..b4c5667f 100644
--- a/pkg/mcp/testdata/toolsets-core-tools.json
+++ b/pkg/mcp/testdata/toolsets-core-tools.json
@@ -4,7 +4,6 @@
"title": "Events: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes events in the current cluster from all namespaces",
@@ -24,7 +23,6 @@
"title": "Namespaces: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes namespaces in the current cluster",
@@ -33,10 +31,89 @@
},
"name": "namespaces_list"
},
+ {
+ "annotations": {
+ "title": "Node: Log",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get logs from",
+ "type": "string"
+ },
+ "query": {
+ "description": "query specifies services(s) or files from which to return logs (required). Example: \"kubelet\" to fetch kubelet logs, \"/\u003clog-file-name\u003e\" to fetch a specific log file from the node (e.g., \"/var/log/kubelet.log\" or \"/var/log/kube-proxy.log\")",
+ "type": "string"
+ },
+ "tailLines": {
+ "default": 100,
+ "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "query"
+ ]
+ },
+ "name": "nodes_log"
+ },
+ {
+ "annotations": {
+ "title": "Node: Stats Summary",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get stats from",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": "nodes_stats_summary"
+ },
+ {
+ "annotations": {
+ "title": "Nodes: Top",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "idempotentHint": true,
+ "openWorldHint": true
+ },
+ "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ "type": "string"
+ },
+ "label_selector": {
+ "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ "type": "string"
+ }
+ }
+ },
+ "name": "nodes_top"
+ },
{
"annotations": {
"title": "Pods: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -63,9 +140,7 @@
{
"annotations": {
"title": "Pods: Exec",
- "readOnlyHint": false,
"destructiveHint": true,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
@@ -104,7 +179,6 @@
"title": "Pods: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -131,7 +205,6 @@
"title": "Pods: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
@@ -152,7 +225,6 @@
"title": "Pods: List in Namespace",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
@@ -180,7 +252,6 @@
"title": "Pods: Log",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -219,9 +290,7 @@
{
"annotations": {
"title": "Pods: Run",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
@@ -288,7 +357,6 @@
{
"annotations": {
"title": "Resources: Create or Update",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -311,7 +379,6 @@
{
"annotations": {
"title": "Resources: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -350,7 +417,6 @@
"title": "Resources: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
@@ -387,7 +453,6 @@
"title": "Resources: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
index 97af6fb5..7831c054 100644
--- a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
+++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
@@ -18,7 +18,6 @@
"title": "Configuration: View",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
@@ -38,7 +37,6 @@
"title": "Events: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes events in the current cluster from all namespaces",
@@ -64,9 +62,7 @@
{
"annotations": {
"title": "Helm: Install",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Install a Helm chart in the current or provided namespace",
@@ -109,7 +105,6 @@
"title": "Helm: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
@@ -139,7 +134,6 @@
{
"annotations": {
"title": "Helm: Uninstall",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -176,7 +170,6 @@
"title": "Namespaces: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes namespaces in the current cluster",
@@ -195,10 +188,113 @@
},
"name": "namespaces_list"
},
+ {
+ "annotations": {
+ "title": "Node: Log",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "enum": [
+ "extra-cluster",
+ "fake-context"
+ ],
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the node to get logs from",
+ "type": "string"
+ },
+ "query": {
+ "description": "query specifies services(s) or files from which to return logs (required). Example: \"kubelet\" to fetch kubelet logs, \"/\u003clog-file-name\u003e\" to fetch a specific log file from the node (e.g., \"/var/log/kubelet.log\" or \"/var/log/kube-proxy.log\")",
+ "type": "string"
+ },
+ "tailLines": {
+ "default": 100,
+ "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "query"
+ ]
+ },
+ "name": "nodes_log"
+ },
+ {
+ "annotations": {
+ "title": "Node: Stats Summary",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "enum": [
+ "extra-cluster",
+ "fake-context"
+ ],
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the node to get stats from",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": "nodes_stats_summary"
+ },
+ {
+ "annotations": {
+ "title": "Nodes: Top",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "idempotentHint": true,
+ "openWorldHint": true
+ },
+ "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "enum": [
+ "extra-cluster",
+ "fake-context"
+ ],
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ "type": "string"
+ },
+ "label_selector": {
+ "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ "type": "string"
+ }
+ }
+ },
+ "name": "nodes_top"
+ },
{
"annotations": {
"title": "Pods: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -233,9 +329,7 @@
{
"annotations": {
"title": "Pods: Exec",
- "readOnlyHint": false,
"destructiveHint": true,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
@@ -282,7 +376,6 @@
"title": "Pods: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -317,7 +410,6 @@
"title": "Pods: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
@@ -346,7 +438,6 @@
"title": "Pods: List in Namespace",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
@@ -382,7 +473,6 @@
"title": "Pods: Log",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -429,9 +519,7 @@
{
"annotations": {
"title": "Pods: Run",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
@@ -514,7 +602,6 @@
{
"annotations": {
"title": "Resources: Create or Update",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -545,7 +632,6 @@
{
"annotations": {
"title": "Resources: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -592,7 +678,6 @@
"title": "Resources: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
@@ -637,7 +722,6 @@
"title": "Resources: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json
index 861a1b5a..b95f179c 100644
--- a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json
+++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json
@@ -18,7 +18,6 @@
"title": "Configuration: View",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
@@ -38,7 +37,6 @@
"title": "Events: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes events in the current cluster from all namespaces",
@@ -60,9 +58,7 @@
{
"annotations": {
"title": "Helm: Install",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Install a Helm chart in the current or provided namespace",
@@ -101,7 +97,6 @@
"title": "Helm: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
@@ -127,7 +122,6 @@
{
"annotations": {
"title": "Helm: Uninstall",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -160,7 +154,6 @@
"title": "Namespaces: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes namespaces in the current cluster",
@@ -175,10 +168,101 @@
},
"name": "namespaces_list"
},
+ {
+ "annotations": {
+ "title": "Node: Log",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the node to get logs from",
+ "type": "string"
+ },
+ "query": {
+ "description": "query specifies services(s) or files from which to return logs (required). Example: \"kubelet\" to fetch kubelet logs, \"/\u003clog-file-name\u003e\" to fetch a specific log file from the node (e.g., \"/var/log/kubelet.log\" or \"/var/log/kube-proxy.log\")",
+ "type": "string"
+ },
+ "tailLines": {
+ "default": 100,
+ "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "query"
+ ]
+ },
+ "name": "nodes_log"
+ },
+ {
+ "annotations": {
+ "title": "Node: Stats Summary",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the node to get stats from",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": "nodes_stats_summary"
+ },
+ {
+ "annotations": {
+ "title": "Nodes: Top",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "idempotentHint": true,
+ "openWorldHint": true
+ },
+ "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ "type": "string"
+ },
+ "label_selector": {
+ "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ "type": "string"
+ }
+ }
+ },
+ "name": "nodes_top"
+ },
{
"annotations": {
"title": "Pods: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -209,9 +293,7 @@
{
"annotations": {
"title": "Pods: Exec",
- "readOnlyHint": false,
"destructiveHint": true,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
@@ -254,7 +336,6 @@
"title": "Pods: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -285,7 +366,6 @@
"title": "Pods: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
@@ -310,7 +390,6 @@
"title": "Pods: List in Namespace",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
@@ -342,7 +421,6 @@
"title": "Pods: Log",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -385,9 +463,7 @@
{
"annotations": {
"title": "Pods: Run",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
@@ -462,7 +538,6 @@
{
"annotations": {
"title": "Resources: Create or Update",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -489,7 +564,6 @@
{
"annotations": {
"title": "Resources: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -532,7 +606,6 @@
"title": "Resources: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
@@ -573,7 +646,6 @@
"title": "Resources: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
diff --git a/pkg/mcp/testdata/toolsets-full-tools-openshift.json b/pkg/mcp/testdata/toolsets-full-tools-openshift.json
index b5018945..e4488b0a 100644
--- a/pkg/mcp/testdata/toolsets-full-tools-openshift.json
+++ b/pkg/mcp/testdata/toolsets-full-tools-openshift.json
@@ -4,7 +4,6 @@
"title": "Configuration: View",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
@@ -24,7 +23,6 @@
"title": "Events: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes events in the current cluster from all namespaces",
@@ -42,9 +40,7 @@
{
"annotations": {
"title": "Helm: Install",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Install a Helm chart in the current or provided namespace",
@@ -79,7 +75,6 @@
"title": "Helm: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
@@ -101,7 +96,6 @@
{
"annotations": {
"title": "Helm: Uninstall",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -130,7 +124,6 @@
"title": "Namespaces: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes namespaces in the current cluster",
@@ -139,10 +132,89 @@
},
"name": "namespaces_list"
},
+ {
+ "annotations": {
+ "title": "Node: Log",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get logs from",
+ "type": "string"
+ },
+ "query": {
+ "description": "query specifies services(s) or files from which to return logs (required). Example: \"kubelet\" to fetch kubelet logs, \"/\u003clog-file-name\u003e\" to fetch a specific log file from the node (e.g., \"/var/log/kubelet.log\" or \"/var/log/kube-proxy.log\")",
+ "type": "string"
+ },
+ "tailLines": {
+ "default": 100,
+ "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "query"
+ ]
+ },
+ "name": "nodes_log"
+ },
+ {
+ "annotations": {
+ "title": "Node: Stats Summary",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get stats from",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": "nodes_stats_summary"
+ },
+ {
+ "annotations": {
+ "title": "Nodes: Top",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "idempotentHint": true,
+ "openWorldHint": true
+ },
+ "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ "type": "string"
+ },
+ "label_selector": {
+ "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ "type": "string"
+ }
+ }
+ },
+ "name": "nodes_top"
+ },
{
"annotations": {
"title": "Pods: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -169,9 +241,7 @@
{
"annotations": {
"title": "Pods: Exec",
- "readOnlyHint": false,
"destructiveHint": true,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
@@ -210,7 +280,6 @@
"title": "Pods: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -237,7 +306,6 @@
"title": "Pods: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
@@ -258,7 +326,6 @@
"title": "Pods: List in Namespace",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
@@ -286,7 +353,6 @@
"title": "Pods: Log",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -325,9 +391,7 @@
{
"annotations": {
"title": "Pods: Run",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
@@ -396,7 +460,6 @@
"title": "Projects: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the OpenShift projects in the current cluster",
@@ -408,7 +471,6 @@
{
"annotations": {
"title": "Resources: Create or Update",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -431,7 +493,6 @@
{
"annotations": {
"title": "Resources: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -470,7 +531,6 @@
"title": "Resources: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route)",
@@ -507,7 +567,6 @@
"title": "Resources: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route)",
diff --git a/pkg/mcp/testdata/toolsets-full-tools.json b/pkg/mcp/testdata/toolsets-full-tools.json
index 7b9f471d..ca270027 100644
--- a/pkg/mcp/testdata/toolsets-full-tools.json
+++ b/pkg/mcp/testdata/toolsets-full-tools.json
@@ -4,7 +4,6 @@
"title": "Configuration: View",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
@@ -24,7 +23,6 @@
"title": "Events: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes events in the current cluster from all namespaces",
@@ -42,9 +40,7 @@
{
"annotations": {
"title": "Helm: Install",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Install a Helm chart in the current or provided namespace",
@@ -79,7 +75,6 @@
"title": "Helm: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
@@ -101,7 +96,6 @@
{
"annotations": {
"title": "Helm: Uninstall",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -130,7 +124,6 @@
"title": "Namespaces: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes namespaces in the current cluster",
@@ -139,10 +132,89 @@
},
"name": "namespaces_list"
},
+ {
+ "annotations": {
+ "title": "Node: Log",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get logs from",
+ "type": "string"
+ },
+ "query": {
+ "description": "query specifies services(s) or files from which to return logs (required). Example: \"kubelet\" to fetch kubelet logs, \"/\u003clog-file-name\u003e\" to fetch a specific log file from the node (e.g., \"/var/log/kubelet.log\" or \"/var/log/kube-proxy.log\")",
+ "type": "string"
+ },
+ "tailLines": {
+ "default": 100,
+ "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "query"
+ ]
+ },
+ "name": "nodes_log"
+ },
+ {
+ "annotations": {
+ "title": "Node: Stats Summary",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "openWorldHint": true
+ },
+ "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the node to get stats from",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "name": "nodes_stats_summary"
+ },
+ {
+ "annotations": {
+ "title": "Nodes: Top",
+ "readOnlyHint": true,
+ "destructiveHint": false,
+ "idempotentHint": true,
+ "openWorldHint": true
+ },
+ "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ "type": "string"
+ },
+ "label_selector": {
+ "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ "type": "string"
+ }
+ }
+ },
+ "name": "nodes_top"
+ },
{
"annotations": {
"title": "Pods: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -169,9 +241,7 @@
{
"annotations": {
"title": "Pods: Exec",
- "readOnlyHint": false,
"destructiveHint": true,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
@@ -210,7 +280,6 @@
"title": "Pods: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -237,7 +306,6 @@
"title": "Pods: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
@@ -258,7 +326,6 @@
"title": "Pods: List in Namespace",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
@@ -286,7 +353,6 @@
"title": "Pods: Log",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
@@ -325,9 +391,7 @@
{
"annotations": {
"title": "Pods: Run",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
@@ -394,7 +458,6 @@
{
"annotations": {
"title": "Resources: Create or Update",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -417,7 +480,6 @@
{
"annotations": {
"title": "Resources: Delete",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
@@ -456,7 +518,6 @@
"title": "Resources: Get",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
@@ -493,7 +554,6 @@
"title": "Resources: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
diff --git a/pkg/mcp/testdata/toolsets-helm-tools.json b/pkg/mcp/testdata/toolsets-helm-tools.json
index c57dfc27..6afd3f33 100644
--- a/pkg/mcp/testdata/toolsets-helm-tools.json
+++ b/pkg/mcp/testdata/toolsets-helm-tools.json
@@ -2,9 +2,7 @@
{
"annotations": {
"title": "Helm: Install",
- "readOnlyHint": false,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "Install a Helm chart in the current or provided namespace",
@@ -39,7 +37,6 @@
"title": "Helm: List",
"readOnlyHint": true,
"destructiveHint": false,
- "idempotentHint": false,
"openWorldHint": true
},
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
@@ -61,7 +58,6 @@
{
"annotations": {
"title": "Helm: Uninstall",
- "readOnlyHint": false,
"destructiveHint": true,
"idempotentHint": true,
"openWorldHint": true
diff --git a/pkg/mcp/tool_filter.go b/pkg/mcp/tool_filter.go
index c097132c..28678d96 100644
--- a/pkg/mcp/tool_filter.go
+++ b/pkg/mcp/tool_filter.go
@@ -32,7 +32,7 @@ func ShouldIncludeTargetListTool(targetName string, targets []string) ToolFilter
// TODO: this check should be removed or make more generic when we have other
if tool.Tool.Name == "configuration_contexts_list" && targetName != kubernetes.KubeConfigTargetParameterName {
- // let's not include configuration_contexts_list if we aren't targeting contexts in our ManagerProvider
+ // let's not include configuration_contexts_list if we aren't targeting contexts in our Provider
return false
}
diff --git a/pkg/mcp/toolsets_test.go b/pkg/mcp/toolsets_test.go
index 527b1e22..d81392a5 100644
--- a/pkg/mcp/toolsets_test.go
+++ b/pkg/mcp/toolsets_test.go
@@ -65,6 +65,9 @@ func (s *ToolsetsSuite) TestNoToolsets() {
}
func (s *ToolsetsSuite) TestDefaultToolsetsTools() {
+ if configuration.HasDefaultOverrides() {
+ s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)")
+ }
s.Run("Default configuration toolsets", func() {
s.InitMcpClient()
tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
@@ -82,6 +85,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsTools() {
}
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() {
+ if configuration.HasDefaultOverrides() {
+ s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)")
+ }
s.Run("Default configuration toolsets in OpenShift", func() {
s.Handle(&test.InOpenShiftHandler{})
s.InitMcpClient()
@@ -100,6 +106,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() {
}
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() {
+ if configuration.HasDefaultOverrides() {
+ s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)")
+ }
s.Run("Default configuration toolsets in multi-cluster (with 11 clusters)", func() {
kubeconfig := s.Kubeconfig()
for i := 0; i < 10; i++ {
@@ -123,6 +132,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() {
}
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiClusterEnum() {
+ if configuration.HasDefaultOverrides() {
+ s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)")
+ }
s.Run("Default configuration toolsets in multi-cluster (with 2 clusters)", func() {
kubeconfig := s.Kubeconfig()
// Add additional cluster to force multi-cluster behavior with enum parameter
diff --git a/pkg/toolsets/config/configuration.go b/pkg/toolsets/config/configuration.go
index 6b6b45d3..ab973da1 100644
--- a/pkg/toolsets/config/configuration.go
+++ b/pkg/toolsets/config/configuration.go
@@ -51,7 +51,6 @@ func initConfiguration() []api.ServerTool {
Title: "Configuration: View",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
},
diff --git a/pkg/toolsets/core/events.go b/pkg/toolsets/core/events.go
index f10ff576..43ae1cc1 100644
--- a/pkg/toolsets/core/events.go
+++ b/pkg/toolsets/core/events.go
@@ -28,7 +28,6 @@ func initEvents() []api.ServerTool {
Title: "Events: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: eventsList},
diff --git a/pkg/toolsets/core/namespaces.go b/pkg/toolsets/core/namespaces.go
index 71995d8c..2f2ee8fc 100644
--- a/pkg/toolsets/core/namespaces.go
+++ b/pkg/toolsets/core/namespaces.go
@@ -24,7 +24,6 @@ func initNamespaces(o internalk8s.Openshift) []api.ServerTool {
Title: "Namespaces: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: namespacesList,
@@ -41,7 +40,6 @@ func initNamespaces(o internalk8s.Openshift) []api.ServerTool {
Title: "Projects: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: projectsList,
diff --git a/pkg/toolsets/core/nodes.go b/pkg/toolsets/core/nodes.go
new file mode 100644
index 00000000..e42a8a98
--- /dev/null
+++ b/pkg/toolsets/core/nodes.go
@@ -0,0 +1,191 @@
+package core
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/google/jsonschema-go/jsonschema"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubectl/pkg/metricsutil"
+ "k8s.io/utils/ptr"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
+)
+
+func initNodes() []api.ServerTool {
+ return []api.ServerTool{
+ {Tool: api.Tool{
+ Name: "nodes_log",
+ Description: "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet",
+ InputSchema: &jsonschema.Schema{
+ Type: "object",
+ Properties: map[string]*jsonschema.Schema{
+ "name": {
+ Type: "string",
+ Description: "Name of the node to get logs from",
+ },
+ "query": {
+ Type: "string",
+ Description: `query specifies services(s) or files from which to return logs (required). Example: "kubelet" to fetch kubelet logs, "/" to fetch a specific log file from the node (e.g., "/var/log/kubelet.log" or "/var/log/kube-proxy.log")`,
+ },
+ "tailLines": {
+ Type: "integer",
+ Description: "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)",
+ Default: api.ToRawMessage(100),
+ Minimum: ptr.To(float64(0)),
+ },
+ },
+ Required: []string{"name", "query"},
+ },
+ Annotations: api.ToolAnnotations{
+ Title: "Node: Log",
+ ReadOnlyHint: ptr.To(true),
+ DestructiveHint: ptr.To(false),
+ OpenWorldHint: ptr.To(true),
+ },
+ }, Handler: nodesLog},
+ {Tool: api.Tool{
+ Name: "nodes_stats_summary",
+ Description: "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics",
+ InputSchema: &jsonschema.Schema{
+ Type: "object",
+ Properties: map[string]*jsonschema.Schema{
+ "name": {
+ Type: "string",
+ Description: "Name of the node to get stats from",
+ },
+ },
+ Required: []string{"name"},
+ },
+ Annotations: api.ToolAnnotations{
+ Title: "Node: Stats Summary",
+ ReadOnlyHint: ptr.To(true),
+ DestructiveHint: ptr.To(false),
+ OpenWorldHint: ptr.To(true),
+ },
+ }, Handler: nodesStatsSummary},
+ {Tool: api.Tool{
+ Name: "nodes_top",
+ Description: "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster",
+ InputSchema: &jsonschema.Schema{
+ Type: "object",
+ Properties: map[string]*jsonschema.Schema{
+ "name": {
+ Type: "string",
+ Description: "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)",
+ },
+ "label_selector": {
+ Type: "string",
+ Description: "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)",
+ Pattern: "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
+ },
+ },
+ },
+ Annotations: api.ToolAnnotations{
+ Title: "Nodes: Top",
+ ReadOnlyHint: ptr.To(true),
+ DestructiveHint: ptr.To(false),
+ IdempotentHint: ptr.To(true),
+ OpenWorldHint: ptr.To(true),
+ },
+ }, Handler: nodesTop},
+ }
+}
+
+func nodesLog(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
+ name, ok := params.GetArguments()["name"].(string)
+ if !ok || name == "" {
+ return api.NewToolCallResult("", errors.New("failed to get node log, missing argument name")), nil
+ }
+ query, ok := params.GetArguments()["query"].(string)
+ if !ok || query == "" {
+ return api.NewToolCallResult("", errors.New("failed to get node log, missing argument query")), nil
+ }
+ tailLines := params.GetArguments()["tailLines"]
+ var tailInt int64
+ if tailLines != nil {
+ // Convert to int64 - safely handle both float64 (JSON number) and int types
+ switch v := tailLines.(type) {
+ case float64:
+ tailInt = int64(v)
+ case int:
+ case int64:
+ tailInt = v
+ default:
+ return api.NewToolCallResult("", fmt.Errorf("failed to parse tail parameter: expected integer, got %T", tailLines)), nil
+ }
+ }
+ ret, err := params.NodesLog(params, name, query, tailInt)
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to get node log for %s: %v", name, err)), nil
+ } else if ret == "" {
+ ret = fmt.Sprintf("The node %s has not logged any message yet or the log file is empty", name)
+ }
+ return api.NewToolCallResult(ret, nil), nil
+}
+
+func nodesStatsSummary(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
+ name, ok := params.GetArguments()["name"].(string)
+ if !ok || name == "" {
+ return api.NewToolCallResult("", errors.New("failed to get node stats summary, missing argument name")), nil
+ }
+ ret, err := params.NodesStatsSummary(params, name)
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to get node stats summary for %s: %v", name, err)), nil
+ }
+ return api.NewToolCallResult(ret, nil), nil
+}
+
+func nodesTop(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
+ nodesTopOptions := kubernetes.NodesTopOptions{}
+ if v, ok := params.GetArguments()["name"].(string); ok {
+ nodesTopOptions.Name = v
+ }
+ if v, ok := params.GetArguments()["label_selector"].(string); ok {
+ nodesTopOptions.LabelSelector = v
+ }
+
+ nodeMetrics, err := params.NodesTop(params, nodesTopOptions)
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to get nodes top: %v", err)), nil
+ }
+
+ // Get the list of nodes to extract their allocatable resources
+ nodes, err := params.AccessControlClientset().Nodes()
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to get nodes client: %v", err)), nil
+ }
+
+ nodeList, err := nodes.List(params, metav1.ListOptions{
+ LabelSelector: nodesTopOptions.LabelSelector,
+ })
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to list nodes: %v", err)), nil
+ }
+
+ // Build availableResources map
+ availableResources := make(map[string]v1.ResourceList)
+ for _, n := range nodeList.Items {
+ availableResources[n.Name] = n.Status.Allocatable
+
+ // Handle swap if available
+ if n.Status.NodeInfo.Swap != nil && n.Status.NodeInfo.Swap.Capacity != nil {
+ swapCapacity := *n.Status.NodeInfo.Swap.Capacity
+ availableResources[n.Name]["swap"] = *resource.NewQuantity(swapCapacity, resource.BinarySI)
+ }
+ }
+
+ // Print the metrics
+ buf := new(bytes.Buffer)
+ printer := metricsutil.NewTopCmdPrinter(buf, true)
+ err = printer.PrintNodeMetrics(nodeMetrics.Items, availableResources, false, "")
+ if err != nil {
+ return api.NewToolCallResult("", fmt.Errorf("failed to print node metrics: %v", err)), nil
+ }
+
+ return api.NewToolCallResult(buf.String(), nil), nil
+}
diff --git a/pkg/toolsets/core/pods.go b/pkg/toolsets/core/pods.go
index 8744a974..78781332 100644
--- a/pkg/toolsets/core/pods.go
+++ b/pkg/toolsets/core/pods.go
@@ -33,7 +33,6 @@ func initPods() []api.ServerTool {
Title: "Pods: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsListInAllNamespaces},
@@ -59,7 +58,6 @@ func initPods() []api.ServerTool {
Title: "Pods: List in Namespace",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsListInNamespace},
@@ -84,7 +82,6 @@ func initPods() []api.ServerTool {
Title: "Pods: Get",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsGet},
@@ -107,7 +104,6 @@ func initPods() []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Pods: Delete",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(true),
IdempotentHint: ptr.To(true),
OpenWorldHint: ptr.To(true),
@@ -177,9 +173,7 @@ func initPods() []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Pods: Exec",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(true), // Depending on the Pod's entrypoint, executing certain commands may kill the Pod
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsExec},
@@ -218,7 +212,6 @@ func initPods() []api.ServerTool {
Title: "Pods: Log",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsLog},
@@ -249,9 +242,7 @@ func initPods() []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Pods: Run",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: podsRun},
diff --git a/pkg/toolsets/core/resources.go b/pkg/toolsets/core/resources.go
index a3536f56..52a613b3 100644
--- a/pkg/toolsets/core/resources.go
+++ b/pkg/toolsets/core/resources.go
@@ -51,7 +51,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool {
Title: "Resources: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: resourcesList},
@@ -84,7 +83,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool {
Title: "Resources: Get",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: resourcesGet},
@@ -103,7 +101,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Resources: Create or Update",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(true),
IdempotentHint: ptr.To(true),
OpenWorldHint: ptr.To(true),
@@ -136,7 +133,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Resources: Delete",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(true),
IdempotentHint: ptr.To(true),
OpenWorldHint: ptr.To(true),
diff --git a/pkg/toolsets/core/toolset.go b/pkg/toolsets/core/toolset.go
index 9f88c7aa..dfd61f42 100644
--- a/pkg/toolsets/core/toolset.go
+++ b/pkg/toolsets/core/toolset.go
@@ -24,6 +24,7 @@ func (t *Toolset) GetTools(o internalk8s.Openshift) []api.ServerTool {
return slices.Concat(
initEvents(),
initNamespaces(o),
+ initNodes(),
initPods(),
initResources(o),
)
diff --git a/pkg/toolsets/helm/helm.go b/pkg/toolsets/helm/helm.go
index 0352cf60..646941f1 100644
--- a/pkg/toolsets/helm/helm.go
+++ b/pkg/toolsets/helm/helm.go
@@ -39,9 +39,8 @@ func initHelm() []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Helm: Install",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false), // TODO: consider replacing implementation with equivalent to: helm upgrade --install
+ IdempotentHint: nil, // TODO: consider replacing implementation with equivalent to: helm upgrade --install
OpenWorldHint: ptr.To(true),
},
}, Handler: helmInstall},
@@ -65,7 +64,6 @@ func initHelm() []api.ServerTool {
Title: "Helm: List",
ReadOnlyHint: ptr.To(true),
DestructiveHint: ptr.To(false),
- IdempotentHint: ptr.To(false),
OpenWorldHint: ptr.To(true),
},
}, Handler: helmList},
@@ -88,7 +86,6 @@ func initHelm() []api.ServerTool {
},
Annotations: api.ToolAnnotations{
Title: "Helm: Uninstall",
- ReadOnlyHint: ptr.To(false),
DestructiveHint: ptr.To(true),
IdempotentHint: ptr.To(true),
OpenWorldHint: ptr.To(true),