diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 569597b6a..63d5b270f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -16,6 +16,7 @@ body: - disk-buffering - gcp-auth-extension - gcp-resources + - ibm-mq-metrics - jfr-connection - jfr-events - jmx-metrics @@ -57,3 +58,10 @@ body: attributes: label: Additional context description: Any additional information you think may be relevant to this issue. + - type: dropdown + attributes: + label: Tip + description: This element is static, used to render a helpful sub-heading for end-users and community members to help prioritize issues. Please leave as is. + options: + - [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). + default: 0 diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 59450b16f..45edd560b 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -16,6 +16,7 @@ body: - disk-buffering - gcp-auth-extension - gcp-resources + - ibm-mq-metrics - jfr-connection - jfr-events - jmx-metrics @@ -48,3 +49,10 @@ body: attributes: label: Additional context description: Add any other context or screenshots about the feature request here. + - type: dropdown + attributes: + label: Tip + description: This element is static, used to render a helpful sub-heading for end-users and community members to help prioritize issues. Please leave as is. + options: + - [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). + default: 0 diff --git a/.github.amrom.workers.devponent_owners.yml b/.github.amrom.workers.devponent_owners.yml index de4544864..6b74234ad 100644 --- a/.github.amrom.workers.devponent_owners.yml +++ b/.github.amrom.workers.devponent_owners.yml @@ -1,14 +1,14 @@ -# this file is used by .github/workflows/assign-reviewers.yml +# this file is used by .github/workflows/assign-reviewers.yml and .github/workflows/assign-issue-owners.yml # # NOTE component owners must be members of the GitHub OpenTelemetry organization # so that they can be added to @open-telemetry/java-contrib-triagers -# which in turn is required for them to be auto-assigned as reviewers by the automation +# which in turn is required for them to be auto-assigned as reviewers and issue assignees by the automation # # NOTE when updating this file, don't forget to update the README.md files in the associated # components also # # NOTE when adding/updating one of the component names, don't forget to update the associated -# `comp:*` labels +# `component:*` labels (used for both PR reviews and issue assignment) components: aws-resources: - wangzlei @@ -65,7 +65,7 @@ components: - LikeTheSalad - breedx-splk - jack-berg - prometheus-collector: + prometheus-client-bridge: - jkwatson resource-providers: - breedx-splk @@ -89,3 +89,7 @@ components: - sylvainjuge opamp-client: - LikeTheSalad + - jackshirazi + ibm-mq-metrics: + - breedx-splk + - atoulme diff --git a/.github/config/lychee.toml b/.github/config/lychee.toml new file mode 100644 index 000000000..071c66fd8 --- /dev/null +++ b/.github/config/lychee.toml @@ -0,0 +1,17 @@ +timeout = 30 +retry_wait_time = 5 +max_retries = 6 +max_concurrency = 4 + +# Check link anchors +include_fragments = true + +remap = [ + # workaround for https://github.com/lycheeverse/lychee/issues/1729 + "https://github.com/(.*?)/(.*?)/blob/(.*?)/(.*#.*)$ https://raw.githubusercontent.com/$1/$2/$3/$4" +] + +exclude = [ + # excluding links to pull requests and issues is done for performance + "^https://github.com/open-telemetry/opentelemetry-java-contrib/(issues|pull)/\\d+$", +] diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 000000000..d7f693363 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,66 @@ +# Copilot Instructions for OpenTelemetry Java Contrib + +This repository provides observability instrumentation for Java applications. + +## Code Review Priorities + +### Style Guide Compliance + +**PRIORITY**: Verify that all code changes follow the [Style Guide](../docs/style-guide.md). Check: + +- Code formatting (auto-formatting, static imports, class organization) +- Java language conventions (`final` usage, `@Nullable` annotations, `Optional` usage) +- Performance constraints (hot path allocations) +- Implementation patterns (SPI registration, configuration conventions) +- Gradle conventions (Kotlin DSL, plugin usage, module naming) +- Documentation standards (README files, deprecation processes) + +### Critical Areas + +- **Public APIs**: Changes affect downstream users and require careful review +- **Performance**: Instrumentation must have minimal overhead +- **Thread Safety**: Ensure safe concurrent access patterns +- **Memory Management**: Prevent leaks and excessive allocations + +### Quality Standards + +- Proper error handling with appropriate logging levels +- OpenTelemetry specification and semantic convention compliance +- Resource cleanup and lifecycle management +- Comprehensive unit tests for new functionality + +### Test suites + +This project uses gradle 9 which requires specifying test classes and paths explicitly. + +For example, this will NOT work because it registers a `Test` without specifying the test classes or paths: + +```kotlin +tasks.register("IntegrationTestUserCreds") { + dependsOn(tasks.shadowJar) + dependsOn(tasks.named("copyAgent")) + ... +} +``` + +This is fixed by specifying the test classes and classpath explicitly: + +```kotlin +tasks.register("IntegrationTestUserCreds") { + testClassesDirs = sourceSets.test.get().output.classesDirs + classpath = sourceSets.test.get().runtimeClasspath + + dependsOn(tasks.shadowJar) + dependsOn(tasks.named("copyAgent")) + ... +} +``` + +## Coding Agent Instructions + +When implementing changes or new features: + +1. Follow all [Style Guide](../docs/style-guide.md) conventions and the Code Review Priorities above +2. Run tests to ensure they still pass (use `./gradlew test` and `./gradlew integrationTest` as needed) +3. **Always run `./gradlew spotlessApply`** after making code changes to ensure proper formatting +4. Run markdown lint to ensure it still passes: `npx markdownlint-cli@0.45.0 -c .github/config/markdownlint.yml **/*.md` diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 0efdf7d3a..a7d6e7672 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,25 +1,43 @@ { $schema: 'https://docs.renovatebot.com/renovate-schema.json', extends: [ - 'config:recommended', - 'docker:pinDigests', - 'helpers:pinGitHubActionDigests', + 'config:best-practices', + 'helpers:pinGitHubActionDigestsToSemver', ], ignorePresets: [ ':ignoreModulesAndTests', // needed to keep maven-extension test pom files up-to-date + 'workarounds:javaLTSVersions', // Allow all Java major versions, not just LTS + ], + prHourlyLimit: 5, // we have a large number of parallel runners + labels: [ + 'dependencies', ], - prHourlyLimit: 5, packageRules: [ { - // this is to reduce the number of renovate PRs - matchManagers: [ - 'github-actions', - 'dockerfile', + // reduces the number of Renovate PRs + // (patch updates are typically non-breaking) + groupName: 'all patch versions', + matchUpdateTypes: [ + 'patch', + ], + schedule: [ + 'before 8am every weekday', ], - extends: [ - 'schedule:weekly', + }, + { + // avoids these Renovate PRs from trickling in throughout the week + // (consolidating the review process) + matchUpdateTypes: [ + 'minor', + 'major', + ], + schedule: [ + 'before 8am on Monday', + ], + matchPackageNames: [ + '!io.opentelemetry:**', + '!io.opentelemetry.*:**', ], - groupName: 'weekly update', }, { matchPackageNames: [ @@ -50,6 +68,17 @@ ], enabled: false, }, + { + // junit 6+ requires Java 17+ + matchPackageNames: [ + 'org.junit:**', + 'org.junit.jupiter:**', + ], + matchUpdateTypes: [ + 'major', + ], + enabled: false, + }, { // junit-pioneer 2+ requires Java 11+ matchPackageNames: [ @@ -67,7 +96,7 @@ ], enabled: false, matchPackageNames: [ - 'org.mockito:{/,}**', + 'org.mockito:**', ], }, { @@ -105,25 +134,45 @@ matchCurrentVersion: '3.5.0', enabled: false, matchPackageNames: [ - 'org.apache.maven:{/,}**', + 'org.apache.maven:**', ], }, { groupName: 'spotless packages', matchPackageNames: [ - 'com.diffplug.spotless{/,}**', + 'com.diffplug.spotless', + 'com.diffplug.spotless:**', ], }, { groupName: 'hipparchus packages', matchPackageNames: [ - 'org.hipparchus{/,}**', + 'org.hipparchus:**', ], }, { groupName: 'errorprone packages', matchPackageNames: [ - 'com.google.errorprone{/,}**', + 'com.google.errorprone:**', + ], + }, + { + groupName: 'jackson packages', + matchPackageNames: [ + 'com.fasterxml.jackson:**', + 'com.fasterxml.jackson.core:**', + ], + }, + { + groupName: 'develocity packages', + matchPackageNames: [ + 'com.gradle.develocity:**', + ], + }, + { + groupName: 'bouncycastle packages', + matchPackageNames: [ + 'org.bouncycastle:**', ], }, { @@ -133,7 +182,7 @@ ], enabled: false, matchPackageNames: [ - 'org.openjdk.jmc{/,}**', + 'org.openjdk.jmc:**', ], }, { @@ -144,7 +193,7 @@ matchCurrentVersion: '5.0.0', enabled: false, matchPackageNames: [ - 'jakarta.servlet:{/,}**', + 'jakarta.servlet:**', ], }, { @@ -157,7 +206,7 @@ ], enabled: false, matchPackageNames: [ - 'org.springframework.boot{/,}**', + 'org.springframework.boot:**', ], }, ], @@ -165,12 +214,34 @@ { customType: 'regex', datasourceTemplate: 'npm', - fileMatch: [ - '^.github/workflows/', + managerFilePatterns: [ + '.github/workflows/**', ], matchStrings: [ 'npx (?[^@]+)@(?[^\\s]+)', ], }, + { + customType: 'regex', + datasourceTemplate: 'java-version', + managerFilePatterns: [ + '.github/workflows/**', + ], + matchStrings: [ + '(?\\d+) # renovate: datasource=java-version', + ], + depNameTemplate: 'java', + extractVersionTemplate: '^(?\\d+)', + }, + { + customType: 'regex', + datasourceTemplate: 'github-releases', + managerFilePatterns: [ + '**/build.gradle.kts', + ], + matchStrings: [ + '"https://github.com/(?[^/]+/[^/]+)/zipball/(?.+?)"', + ], + }, ], } diff --git a/.github/repository-settings.md b/.github/repository-settings.md index 0e3d9240c..22299364d 100644 --- a/.github/repository-settings.md +++ b/.github/repository-settings.md @@ -1,24 +1,12 @@ # Repository settings -Same -as [opentelemetry-java-instrumentation repository settings](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/.github/repository-settings.md#repository-settings), -except for +This document describes any changes that have been made to the +settings in this repository outside the settings tracked in the +private admin repo. -- The rules for `gh-pages` and `cloudfoundry` branches are not relevant in this repository. +## Merge queue for `main` -and the enablement of merge queues below. - -## Merge queue - -Needs to be enabled using classic branch protection (instead of rule set) -because of our use of the classic branch protection "Restrict who can push to matching branches" -which otherwise will block the merge queue from merging to main. - -### Restrict branch creation - -- Additional exclusion for `gh-readonly-queue/main/pr-*` - -### Classic branch protection for `main` +[The admin repo doesn't currently support tracking merge queue settings.] - Require merge queue: CHECKED - Build concurrency: 5 diff --git a/.github/scripts/draft-change-log-entries.sh b/.github/scripts/draft-change-log-entries.sh index 845c92672..489c1fe4d 100755 --- a/.github/scripts/draft-change-log-entries.sh +++ b/.github/scripts/draft-change-log-entries.sh @@ -35,6 +35,7 @@ component_names["consistent-sampling/"]="Consistent sampling" component_names["disk-buffering/"]="Disk buffering" component_names["gcp-resources/"]="GCP resources" component_names["gcp-auth-extension/"]="GCP authentication extension" +component_names["ibm-mq-metrics/"]="IBM MQ metrics" component_names["inferred-spans/"]="Inferred spans" component_names["jfr-connection/"]="JFR connection" component_names["jfr-events/"]="JFR events" @@ -44,6 +45,7 @@ component_names["kafka-exporter/"]="Kafka exporter" component_names["maven-extension/"]="Maven extension" component_names["micrometer-meter-provider/"]="Micrometer MeterProvider" component_names["noop-api/"]="No-op API" +component_names["opamp-client/"]="OpAMP client" component_names["processors/"]="Telemetry processors" component_names["prometheus-client-bridge/"]="Prometheus client bridge" component_names["runtime-attach/"]="Runtime attach" diff --git a/.github/scripts/package-lock.json b/.github/scripts/package-lock.json new file mode 100644 index 000000000..f364e1106 --- /dev/null +++ b/.github/scripts/package-lock.json @@ -0,0 +1,27 @@ +{ + "name": "github-scripts", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "github-scripts", + "version": "1.0.0", + "dependencies": { + "yaml": "2.8.1" + } + }, + "node_modules/yaml": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.1.tgz", + "integrity": "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + } + } + } +} diff --git a/.github/scripts/package.json b/.github/scripts/package.json new file mode 100644 index 000000000..efca3b345 --- /dev/null +++ b/.github/scripts/package.json @@ -0,0 +1,9 @@ +{ + "//": "Dependencies for GitHub Actions workflows that use actions/github-script", + "name": "github-scripts", + "version": "1.0.0", + "private": true, + "dependencies": { + "yaml": "2.8.1" + } +} diff --git a/.github/scripts/update-version.sh b/.github/scripts/update-version.sh index 2ec064776..39873d0f1 100755 --- a/.github/scripts/update-version.sh +++ b/.github/scripts/update-version.sh @@ -10,3 +10,5 @@ fi sed -Ei "s/val stableVersion = \"[^\"]*\"/val stableVersion = \"$version\"/" version.gradle.kts sed -Ei "s/val alphaVersion = \"[^\"]*\"/val alphaVersion = \"$alpha_version\"/" version.gradle.kts + +sed -Ei "1 s/(Comparing source compatibility of [a-z-]+)-[0-9]+\.[0-9]+\.[0-9]+(-SNAPSHOT)?.jar/\1-$version.jar/" docs/apidiffs/current_vs_latest/*.txt diff --git a/.github/workflows/assign-issue-owners.yml b/.github/workflows/assign-issue-owners.yml new file mode 100644 index 000000000..7d7acba67 --- /dev/null +++ b/.github/workflows/assign-issue-owners.yml @@ -0,0 +1,78 @@ +--- +name: Assign issue owners + +on: + issues: + types: [labeled] + +permissions: + contents: read + +jobs: + assign-owners: + permissions: + contents: read + issues: write + runs-on: ubuntu-latest + if: startsWith(github.event.label.name, 'component:') + steps: + - name: Checkout repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Install yaml dependency used below + run: npm install .github/scripts + + - name: Parse component label and assign owners + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const fs = require('fs'); + const { parse } = require('yaml'); + + // Extract component name from label + const labelName = context.payload.label.name; + + if (!labelName.startsWith('component:')) { + core.setFailed('Label does not match expected pattern'); + return; + } + + const componentName = labelName.replace('component:', ''); + console.log(`Processing component: ${componentName}`); + + // Read and parse component_owners.yml + const yamlContent = fs.readFileSync('.github.amrom.workers.devponent_owners.yml', 'utf8'); + const data = parse(yamlContent); + + if (!data || !data.components) { + core.setFailed('Invalid component_owners.yml structure'); + return; + } + + const components = data.components; + + if (!(componentName in components)) { + core.setFailed(`Component '${componentName}' not found in component_owners.yml`); + return; + } + + const owners = components[componentName]; + + if (!owners || owners.length === 0) { + core.setFailed(`No owners found for component '${componentName}'`); + return; + } + + console.log(`Found owners: ${owners.join(', ')}`); + + // Assign the issue to the owners + const issueNumber = context.payload.issue.number; + + await github.rest.issues.addAssignees({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issueNumber, + assignees: owners + }); + + console.log(`Successfully assigned issue #${issueNumber} to ${owners.join(', ')}`); diff --git a/.github/workflows/assign-reviewers.yml b/.github/workflows/assign-reviewers.yml index 84a7a77af..d1cf86ce8 100644 --- a/.github/workflows/assign-reviewers.yml +++ b/.github/workflows/assign-reviewers.yml @@ -18,6 +18,7 @@ jobs: pull-requests: write # for assigning reviewers runs-on: ubuntu-latest steps: - - uses: open-telemetry/assign-reviewers-action@ab8aca8056f3b5af18282b54baa57a852c47abf8 # main + - uses: dyladan/component-owners@58bd86e9814d23f1525d0a970682cead459fa783 # v0.1.0 with: config-file: .github.amrom.workers.devponent_owners.yml + assign-owners: false diff --git a/.github/workflows/auto-spotless-apply.yml b/.github/workflows/auto-spotless-apply.yml new file mode 100644 index 000000000..b363b4977 --- /dev/null +++ b/.github/workflows/auto-spotless-apply.yml @@ -0,0 +1,94 @@ +name: Auto spotless apply +on: + workflow_run: + workflows: + - "Auto spotless check" + types: + - completed + +permissions: + contents: read + +jobs: + apply: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Download patch + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + run-id: ${{ github.event.workflow_run.id }} + path: ${{ runner.temp }} + merge-multiple: true + github-token: ${{ github.token }} + + - id: unzip-patch + name: Unzip patch + working-directory: ${{ runner.temp }} + run: | + if [ -f patch ]; then + echo "exists=true" >> $GITHUB_OUTPUT + fi + + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 + if: steps.unzip-patch.outputs.exists == 'true' + id: otelbot-token + with: + app-id: 1296620 + private-key: ${{ secrets.OTELBOT_JAVA_CONTRIB_PRIVATE_KEY }} + + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + if: steps.unzip-patch.outputs.exists == 'true' + with: + repository: "${{ github.event.workflow_run.head_repository.full_name }}" + ref: "${{ github.event.workflow_run.head_branch }}" + token: ${{ steps.otelbot-token.outputs.token }} + + - name: Use CLA approved github bot + if: steps.unzip-patch.outputs.exists == 'true' + # IMPORTANT do not call the .github/scripts/use-cla-approved-bot.sh + # since that script could have been compromised in the PR branch + run: | + git config user.name otelbot + git config user.email 197425009+otelbot@users.noreply.github.com + + - name: Apply patch and push + if: steps.unzip-patch.outputs.exists == 'true' + run: | + git apply "${{ runner.temp }}/patch" + git commit -a -m "./gradlew spotlessApply" + git push + + - id: get-pr + if: steps.unzip-patch.outputs.exists == 'true' + name: Get PR + env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ github.token }} + PR_BRANCH: |- + ${{ + (github.event.workflow_run.head_repository.owner.login != github.event.workflow_run.repository.owner.login) + && format('{0}:{1}', github.event.workflow_run.head_repository.owner.login, github.event.workflow_run.head_branch) + || github.event.workflow_run.head_branch + }} + run: | + number=$(gh pr view "$PR_BRANCH" --json number --jq .number) + echo "number=$number" >> $GITHUB_OUTPUT + + - if: steps.unzip-patch.outputs.exists == 'true' && success() + env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ steps.otelbot-token.outputs.token }} + PR_NUMBER: ${{ steps.get-pr.outputs.number }} + run: | + gh pr comment $PR_NUMBER --body "🔧 The result from spotlessApply was committed to the PR branch." + + - if: steps.unzip-patch.outputs.exists == 'true' && failure() + env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ steps.otelbot-token.outputs.token }} + PR_NUMBER: ${{ steps.get-pr.outputs.number }} + run: | + gh pr comment $PR_NUMBER --body "❌ The result from spotlessApply could not be committed to the PR branch, see logs: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID." diff --git a/.github/workflows/auto-spotless-check.yml b/.github/workflows/auto-spotless-check.yml new file mode 100644 index 000000000..e5ec0e2e4 --- /dev/null +++ b/.github/workflows/auto-spotless-check.yml @@ -0,0 +1,53 @@ +name: Auto spotless check +on: + pull_request: + types: + - opened + - synchronize + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + with: + cache-read-only: true + + - name: Check out PR branch + env: + GH_TOKEN: ${{ github.token }} + run: gh pr checkout ${{ github.event.pull_request.number }} + + - name: Spotless + run: ./gradlew spotlessApply + + - id: create-patch + name: Create patch file + run: | + git diff > patch + if [ -s patch ]; then + echo "exists=true" >> "$GITHUB_OUTPUT" + fi + + - name: Upload patch file + if: steps.create-patch.outputs.exists == 'true' + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + path: patch + name: patch diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index e34c77489..a798378c6 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ permissions: jobs: backport: permissions: - contents: write # for Git to git push + contents: write # for git push to PR branch runs-on: ubuntu-latest steps: - run: | @@ -21,7 +21,7 @@ jobs: exit 1 fi - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: # history is needed to run git cherry-pick below fetch-depth: 0 @@ -29,7 +29,7 @@ jobs: - name: Use CLA approved bot run: .github/scripts/use-cla-approved-bot.sh - - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} @@ -48,6 +48,15 @@ jobs: git checkout -b $branch git cherry-pick $commit + + if git diff --name-only HEAD~1 HEAD | grep -q '^\.github/workflows/'; then + echo "::error::This PR contains changes to workflow files (.github/workflows/)." + echo "::error::Workflow files cannot be automatically backported because the standard" + echo "::error::GitHub token doesn't have the required 'workflow' write permission." + echo "::error::Please backport this PR manually." + exit 1 + fi + git push --set-upstream origin $branch gh pr create --title "[$GITHUB_REF_NAME] $title" \ --body "Clean cherry-pick of #$NUMBER to the \`$GITHUB_REF_NAME\` branch." \ diff --git a/.github/workflows/build-common.yml b/.github/workflows/build-common.yml new file mode 100644 index 000000000..20ea2f153 --- /dev/null +++ b/.github/workflows/build-common.yml @@ -0,0 +1,162 @@ +name: Reusable - Common + +on: + workflow_call: + inputs: + cache-read-only: + type: boolean + required: false + no-build-cache: + type: boolean + required: false + max-test-retries: + type: number + required: false + default: 0 + +permissions: + contents: read + +jobs: + spotless: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up Gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + with: + cache-read-only: ${{ inputs.cache-read-only }} + + - name: Spotless + run: ./gradlew spotlessCheck ${{ inputs.no-build-cache && '--no-build-cache' || '' }} + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up Gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + with: + cache-read-only: ${{ inputs.cache-read-only }} + + - name: Build + run: ./gradlew build -x spotlessCheck -x test ${{ inputs.no-build-cache && '--no-build-cache' || '' }} + + - name: Check for jApiCmp diffs + # The jApiCmp diff compares current to latest, which isn't appropriate for release branches + if: ${{ !startsWith(github.ref_name, 'release/') && !startsWith(github.base_ref, 'release/') }} + run: | + # need to "git add" in case any generated files did not already exist + git add docs/apidiffs + if git diff --cached --quiet + then + echo "No diff detected." + else + echo "Diff detected - did you run './gradlew jApiCmp'?" + git diff --cached --name-only + git diff --cached + exit 1 + fi + + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + - ubuntu-latest + - windows-latest + test-java-version: + - 8 + - 11 + - 17 + - 21 + - 25 # renovate: datasource=java-version + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - id: setup-java-test + name: Set up Java ${{ matrix.test-java-version }} for tests + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: ${{ matrix.test-java-version }} + + - id: setup-java + name: Set up Java for build + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up Gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + with: + cache-read-only: ${{ inputs.cache-read-only }} + + - name: Test + run: > + ./gradlew test + "-PtestJavaVersion=${{ matrix.test-java-version }}" + "-Porg.gradle.java.installations.paths=${{ steps.setup-java-test.outputs.path }}" + "-Porg.gradle.java.installations.auto-download=false" + "-PmaxTestRetries=${{ inputs.max-test-retries }}" + ${{ inputs.no-build-cache && '--no-build-cache' || '' }} + + - name: Build scan + if: ${{ !cancelled() && hashFiles('build-scan.txt') != '' }} + run: cat build-scan.txt + + integration-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up Gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + with: + cache-read-only: ${{ inputs.cache-read-only }} + + - name: Integration test + run: ./gradlew integrationTest "-PmaxTestRetries=${{ inputs.max-test-retries }}" ${{ inputs.no-build-cache && '--no-build-cache' || '' }} + + - name: Build scan + if: ${{ !cancelled() && hashFiles('build-scan.txt') != '' }} + run: cat build-scan.txt + + - name: Save integration test results + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + if: always() + with: + name: integration-test-results + path: jmx-metrics/build/reports/tests/integrationTest + + markdown-lint-check: + uses: ./.github/workflows/reusable-markdown-lint.yml + + misspell-check: + uses: ./.github/workflows/reusable-misspell-check.yml + + shell-script-check: + uses: ./.github/workflows/reusable-shell-script-check.yml diff --git a/.github/workflows/build-daily.yml b/.github/workflows/build-daily.yml new file mode 100644 index 000000000..7704cab52 --- /dev/null +++ b/.github/workflows/build-daily.yml @@ -0,0 +1,35 @@ +name: Build Daily + +on: + workflow_dispatch: + schedule: + # Run daily at 7:30 AM UTC + - cron: '30 7 * * *' + +permissions: + contents: read + +jobs: + common: + uses: ./.github/workflows/build-common.yml + with: + no-build-cache: true + + link-check: + uses: ./.github/workflows/reusable-link-check.yml + + workflow-notification: + permissions: + contents: read + issues: write + if: always() + needs: + - common + - link-check + uses: ./.github/workflows/reusable-workflow-notification.yml + with: + success: >- + ${{ + needs.common.result == 'success' && + needs.link-check.result == 'success' + }} diff --git a/.github/workflows/build-pull-request.yml b/.github/workflows/build-pull-request.yml new file mode 100644 index 000000000..d41d87f5f --- /dev/null +++ b/.github/workflows/build-pull-request.yml @@ -0,0 +1,34 @@ +name: Build Pull Request + +on: + pull_request: + merge_group: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + common: + uses: ./.github/workflows/build-common.yml + with: + cache-read-only: true + # retry in merge queue to avoid unnecessary failures + max-test-retries: ${{ github.event_name == 'merge_group' && 5 || 0 }} + + link-check: + uses: ./.github/workflows/reusable-link-check.yml + + required-status-check: + if: always() + needs: + - common + - link-check # wait for link check to complete, but don't require it to pass for merging + runs-on: ubuntu-latest + steps: + # The reusable workflow success depends on all its jobs passing + - if: needs.common.result != 'success' + run: exit 1 # fail diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 787afe869..c16ad6cd8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,176 +5,40 @@ on: branches: - main - release/* - pull_request: - merge_group: - workflow_dispatch: permissions: contents: read -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true - jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - distribution: temurin - java-version: 17 - - - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - with: - cache-read-only: ${{ github.event_name == 'pull_request' }} - - name: Gradle build and test - run: ./gradlew build -x test - - test: - name: test (${{ matrix.test-java-version }}) - runs-on: ubuntu-latest - strategy: - matrix: - test-java-version: - - 8 - - 11 - - 17 - - 21 - - 23 - fail-fast: false - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - id: setup-test-java - name: Set up JDK ${{ matrix.test-java-version }} for running tests - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - # using zulu because new releases get published quickly - distribution: zulu - java-version: ${{ matrix.test-java-version }} - - - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - distribution: temurin - java-version: 17 - - - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - with: - cache-read-only: ${{ github.event_name == 'pull_request' }} - - name: Gradle test - run: > - ./gradlew test - -PtestJavaVersion=${{ matrix.test-java-version }} - -Porg.gradle.java.installations.paths=${{ steps.setup-test-java.outputs.path }} - -Porg.gradle.java.installations.auto-download=false - - integration-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - distribution: temurin - java-version: 17 - - - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - with: - cache-read-only: ${{ github.event_name == 'pull_request' }} + common: + uses: ./.github/workflows/build-common.yml - - name: Integration test - run: ./gradlew integrationTest - - - name: Save integration test results - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: always() - with: - name: integration-test-results - path: jmx-metrics/build/reports/tests/integrationTest - - markdown-link-check: - uses: ./.github/workflows/reusable-markdown-link-check.yml - - markdown-lint-check: - uses: ./.github/workflows/reusable-markdown-lint.yml - - misspell-check: - uses: ./.github/workflows/reusable-misspell-check.yml - - shell-script-check: - uses: ./.github/workflows/reusable-shell-script-check.yml + # Link check is disabled for push events to avoid unnecessary CI failures + # (these failures will instead be captured by the daily scheduled run) + # and for release branches to avoid unnecessary maintenance if external links break publish-snapshots: - # the condition is on the steps below instead of here on the job, because skipping the job - # causes the job to show up as canceled in the GitHub UI which prevents the PR build section - # from collapsing when everything (else) is green - # - # and the name is updated when the steps below are skipped which makes what's happening clearer - # in the GitHub UI - # - # note: the condition below has to be written so that '' is last since it resolves to false - # and so would not short-circuit if used in the second-last position - name: publish-snapshots${{ (github.ref_name != 'main' || github.repository != 'open-telemetry/opentelemetry-java-contrib') && ' (skipped)' || '' }} needs: - # intentionally not blocking snapshot publishing on markdown-link-check or misspell-check - - build - - integration-test + - common runs-on: ubuntu-latest + # skipping release branches because the versions in those branches are not snapshots + if: github.ref_name == 'main' && github.repository == 'open-telemetry/opentelemetry-java-contrib' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 with: distribution: temurin java-version: 17 - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - # skipping release branches because the versions in those branches are not snapshots - # (also this skips pull requests) - if: ${{ github.ref_name == 'main' && github.repository == 'open-telemetry/opentelemetry-java-contrib' }} + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + - name: Build and publish snapshots - if: ${{ github.ref_name == 'main' && github.repository == 'open-telemetry/opentelemetry-java-contrib' }} run: ./gradlew assemble publishToSonatype env: SONATYPE_USER: ${{ secrets.SONATYPE_USER }} SONATYPE_KEY: ${{ secrets.SONATYPE_KEY }} GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} GPG_PASSWORD: ${{ secrets.GPG_PASSWORD }} - - required-status-check: - if: (github.event_name == 'pull_request' || github.event_name == 'merge_group') && always() - needs: - - build - - test - - integration-test - - markdown-lint-check - - misspell-check - - shell-script-check - runs-on: ubuntu-latest - steps: - # only the build and test checks are required for release branch PRs in order - # to avoid any unnecessary release branch maintenance (especially for patches) - - if: | - needs.build.result != 'success' || - needs.test.result != 'success' || - needs.integration-test.result != 'success' || - ( - !startsWith(github.base_ref, 'release/') && - ( - needs.markdown-lint-check.result != 'success' || - needs.misspell-check.result != 'success' || - needs.shell-script-check.result != 'success' - ) - ) - run: exit 1 # fail diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ff49bce2f..3b845ff19 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,17 +1,20 @@ name: CodeQL on: - push: + pull_request: branches: - main - release/* - pull_request: # TODO (trask) adding this to the merge queue causes the merge queue to fail # see related issues # - https://github.com/github/codeql-action/issues/1572 # - https://github.com/github/codeql-action/issues/1537 # - https://github.com/github/codeql-action/issues/2691 # merge_group: + push: + branches: + - main + - release/* schedule: - cron: "29 13 * * 2" # weekly at 13:29 UTC on Tuesday @@ -20,36 +23,48 @@ permissions: jobs: analyze: + name: Analyze (${{ matrix.language }}) permissions: contents: read actions: read # for github/codeql-action/init to get workflow details security-events: write # for github/codeql-action/analyze to upload SARIF results + strategy: + fail-fast: false + matrix: + include: + - language: actions + - language: java runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set up Java 17 - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 + if: matrix.language == 'java' + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 with: distribution: temurin java-version: 17 - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 + if: matrix.language == 'java' + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 - name: Initialize CodeQL - uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/init@a8d1ac45b9a34d11fe398d5503176af0d06b303e # v3.30.7 with: - languages: java, actions - # using "latest" helps to keep up with the latest Kotlin support + languages: ${{ matrix.language }} + # using "linked" helps to keep up with the latest Kotlin support # see https://github.com/github/codeql-action/issues/1555#issuecomment-1452228433 - tools: latest + tools: linked - name: Assemble + if: matrix.language == 'java' # --no-build-cache is required for codeql to analyze all modules # --no-daemon is required for codeql to observe the compilation # (see https://docs.github.com/en/code-security/codeql-cli/getting-started-with-the-codeql-cli/preparing-your-code-for-codeql-analysis#specifying-build-commands) run: ./gradlew assemble --no-build-cache --no-daemon - name: Perform CodeQL analysis - uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/analyze@a8d1ac45b9a34d11fe398d5503176af0d06b303e # v3.30.7 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml new file mode 100644 index 000000000..e167d7c5e --- /dev/null +++ b/.github/workflows/copilot-setup-steps.yml @@ -0,0 +1,34 @@ +# Custom setup steps for GitHub Copilot coding agent to speed up Copilot's work on coding tasks +name: "Copilot Setup Steps" + +on: + pull_request: + paths: + - .github/workflows/copilot-setup-steps.yml + push: + paths: + - .github/workflows/copilot-setup-steps.yml + workflow_dispatch: + +permissions: + contents: read + +jobs: + copilot-setup-steps: # Job name required by GitHub Copilot coding agent + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + + - name: Build project and download dependencies + run: ./gradlew build -x test diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index 03d4e5684..4f2c7d5d6 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -12,9 +12,9 @@ jobs: fossa: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: fossas/fossa-action@c0a7d013f84c8ee5e910593186598625513cc1e4 # v1.6.0 + - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 with: api-key: ${{secrets.FOSSA_API_KEY}} team: OpenTelemetry diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml index 658fbac69..d2e124e7d 100644 --- a/.github/workflows/gradle-wrapper-validation.yml +++ b/.github/workflows/gradle-wrapper-validation.yml @@ -15,6 +15,6 @@ jobs: gradle-wrapper-validation: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: gradle/actions/wrapper-validation@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 + - uses: gradle/actions/wrapper-validation@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 diff --git a/.github/workflows/issue-management-feedback-label.yml b/.github/workflows/issue-management-feedback-label.yml index 35fa82926..49db5efb5 100644 --- a/.github/workflows/issue-management-feedback-label.yml +++ b/.github/workflows/issue-management-feedback-label.yml @@ -12,12 +12,13 @@ jobs: permissions: contents: read issues: write + pull-requests: write if: > contains(github.event.issue.labels.*.name, 'needs author feedback') && github.event.comment.user.login == github.event.issue.user.login runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Remove labels env: diff --git a/.github/workflows/issue-management-stale-action.yml b/.github/workflows/issue-management-stale-action.yml index 483df9b15..1cb3de9a5 100644 --- a/.github/workflows/issue-management-stale-action.yml +++ b/.github/workflows/issue-management-stale-action.yml @@ -4,6 +4,7 @@ on: schedule: # hourly at minute 23 - cron: "23 * * * *" + workflow_dispatch: permissions: contents: read @@ -16,21 +17,52 @@ jobs: pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-latest steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + # Action #1: Handle issues/PRs awaiting author feedback + # - After 7 days inactive: Adds "stale" label + warning comment + # - After 7 more days inactive: Closes + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 with: - repo-token: ${{ secrets.GITHUB_TOKEN }} + only-labels: "needs author feedback" days-before-stale: 7 days-before-close: 7 - only-labels: "needs author feedback" stale-issue-label: stale stale-issue-message: > - This has been automatically marked as stale because it has been marked - as needing author feedback and has not had any activity for 7 days. - It will be closed automatically if there is no response from the author - within 7 additional days from this comment. + This issue has been labeled as stale due to lack of activity and needing author feedback. + It will be automatically closed if there is no further activity over the next 7 days. + stale-pr-label: stale + stale-pr-message: > + This PR has been labeled as stale due to lack of activity and needing author feedback. + It will be automatically closed if there is no further activity over the next 7 days. + + # Action #2: Close old enhancement requests + # - Targets: Issues with "enhancement" label (but NOT "needs author feedback") + # - After 365 days inactive: Adds "stale" label + closes immediately (no warning period) + # - Skips: Issues with "needs author feedback" to avoid conflicts with Action #1 + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 + with: + only-labels: "enhancement" + # Skip issues that need author feedback (handled by the first action with 7+7 day policy) + exempt-issue-labels: "needs author feedback" + days-before-pr-stale: -1 + days-before-pr-close: -1 + days-before-issue-stale: 365 + days-before-issue-close: 0 + stale-issue-label: stale + close-issue-message: > + Since there has been no activity on this enhancement for the past year we are closing it to help maintain our backlog. + Anyone who would like to work on it is still welcome to do so, and we can re-open it at that time. + + # Action #3: Handle stale PRs + # - After 180 days inactive: Adds "stale" label + warning comment + # - After 14 more days inactive: Closes + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 + with: + days-before-issue-stale: -1 + days-before-issue-close: -1 + days-before-pr-stale: 90 + days-before-pr-close: 14 stale-pr-label: stale stale-pr-message: > - This has been automatically marked as stale because it has been marked - as needing author feedback and has not had any activity for 7 days. - It will be closed automatically if there is no response from the author - within 7 additional days from this comment. + This PR has been labeled as stale due to lack of activity. + It will be automatically closed if there is no further activity over the next 14 days. + exempt-draft-pr: false diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml index 886c27e3e..c7fc5ab9d 100644 --- a/.github/workflows/ossf-scorecard.yml +++ b/.github/workflows/ossf-scorecard.yml @@ -19,12 +19,22 @@ jobs: # Needed for GitHub OIDC token if publish_results is true id-token: write steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 + id: create-token with: + # analyzing classic branch protections requires a token with admin read permissions + # see https://github.com/ossf/scorecard-action/blob/main/docs/authentication/fine-grained-auth-token.md + # and https://github.com/open-telemetry/community/issues/2769 + app-id: ${{ vars.OSSF_SCORECARD_APP_ID }} + private-key: ${{ secrets.OSSF_SCORECARD_PRIVATE_KEY }} + + - uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 + with: + repo_token: ${{ steps.create-token.outputs.token }} results_file: results.sarif results_format: sarif publish_results: true @@ -42,6 +52,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3.28.15 + uses: github/codeql-action/upload-sarif@a8d1ac45b9a34d11fe398d5503176af0d06b303e # v3.30.7 with: sarif_file: results.sarif diff --git a/.github/workflows/owasp-dependency-check-daily.yml b/.github/workflows/owasp-dependency-check-daily.yml index dd6708163..c8ddc6b6c 100644 --- a/.github/workflows/owasp-dependency-check-daily.yml +++ b/.github/workflows/owasp-dependency-check-daily.yml @@ -15,10 +15,10 @@ jobs: analyze: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 with: distribution: temurin java-version: 17 @@ -27,7 +27,7 @@ jobs: run: | sed -i "s/org.gradle.jvmargs=/org.gradle.jvmargs=-Xmx3g /" gradle.properties - - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 + - uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 - run: ./gradlew dependencyCheckAnalyze env: diff --git a/.github/workflows/prepare-patch-release.yml b/.github/workflows/prepare-patch-release.yml index 050fc5a7f..70bfd208a 100644 --- a/.github/workflows/prepare-patch-release.yml +++ b/.github/workflows/prepare-patch-release.yml @@ -11,7 +11,7 @@ jobs: contents: write # for Git to git push runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - run: | if [[ ! $GITHUB_REF_NAME =~ ^release/v[0-9]+\.[0-9]+\.x$ ]]; then @@ -47,7 +47,7 @@ jobs: - name: Use CLA approved bot run: .github/scripts/use-cla-approved-bot.sh - - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} diff --git a/.github/workflows/prepare-release-branch.yml b/.github/workflows/prepare-release-branch.yml index 1e2a00c60..58c5dc8bb 100644 --- a/.github/workflows/prepare-release-branch.yml +++ b/.github/workflows/prepare-release-branch.yml @@ -9,7 +9,7 @@ jobs: prereqs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Verify prerequisites run: | @@ -30,7 +30,7 @@ jobs: needs: - prereqs steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Create release branch run: | @@ -59,7 +59,7 @@ jobs: - name: Use CLA approved bot run: .github/scripts/use-cla-approved-bot.sh - - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} @@ -87,7 +87,7 @@ jobs: needs: - prereqs steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set environment variables run: | @@ -116,7 +116,7 @@ jobs: - name: Use CLA approved bot run: .github/scripts/use-cla-approved-bot.sh - - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 244b009d5..3ef09e0a2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,64 +1,25 @@ name: Release on: workflow_dispatch: + inputs: + already-published: + description: 'Skip publishing, download artifacts from Maven Central instead' + default: false + type: boolean permissions: contents: read jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - distribution: temurin - java-version: 17 - - - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - - name: Gradle build - run: ./gradlew build - - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - name: Save unit test results - if: always() - with: - name: test-results - path: jmx-metrics/build/reports/tests/test - - integration-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 - with: - distribution: temurin - java-version: 17 - - - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 - - name: Integration test - run: ./gradlew integrationTest - - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - name: Save integration test results - if: always() - with: - name: integration-test-results - path: jmx-metrics/build/reports/tests/integrationTest + common: + uses: ./.github/workflows/build-common.yml release: permissions: contents: write # for creating the release runs-on: ubuntu-latest needs: - - build - - integration-test + - common outputs: version: ${{ steps.create-github-release.outputs.version }} steps: @@ -68,7 +29,7 @@ jobs: exit 1 fi - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Set environment variables run: | @@ -97,7 +58,7 @@ jobs: # check out main branch to verify there won't be problems with merging the change log # at the end of this workflow - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: ref: main @@ -112,20 +73,22 @@ jobs: fi # back to the release branch - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: # tags are needed for the generate-release-contributors.sh script fetch-depth: 0 - name: Set up JDK for running Gradle - uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1 + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 with: distribution: temurin java-version: 17 - name: Set up gradle - uses: gradle/actions/setup-gradle@06832c7b30a0129d7fb559bcc6e43d26f6374244 # v4.3.1 + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + - name: Build and publish artifacts + if: ${{ !inputs.already-published }} run: ./gradlew assemble publishToSonatype closeAndReleaseSonatypeStagingRepository env: SONATYPE_USER: ${{ secrets.SONATYPE_USER }} @@ -133,6 +96,21 @@ jobs: GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} GPG_PASSWORD: ${{ secrets.GPG_PASSWORD }} + - name: Download artifacts from Maven Central (when already published) + if: ${{ inputs.already-published }} + run: | + mkdir -p jmx-metrics/build/libs + mkdir -p jmx-scraper/build/libs + + curl -L -o jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar \ + "https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-jmx-metrics/$VERSION-alpha/opentelemetry-jmx-metrics-$VERSION-alpha.jar" + curl -L -o jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar.asc \ + "https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-jmx-metrics/$VERSION-alpha/opentelemetry-jmx-metrics-$VERSION-alpha.jar.asc" + curl -L -o jmx-scraper/build/libs/opentelemetry-jmx-scraper-$VERSION-alpha.jar \ + "https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-jmx-scraper/$VERSION-alpha/opentelemetry-jmx-scraper-$VERSION-alpha.jar" + curl -L -o jmx-scraper/build/libs/opentelemetry-jmx-scraper-$VERSION-alpha.jar.asc \ + "https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-jmx-scraper/$VERSION-alpha/opentelemetry-jmx-scraper-$VERSION-alpha.jar.asc" + - name: Generate release notes env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -181,23 +159,30 @@ jobs: run: | cp jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar opentelemetry-jmx-metrics.jar cp jmx-metrics/build/libs/opentelemetry-jmx-metrics-$VERSION-alpha.jar.asc opentelemetry-jmx-metrics.jar.asc + cp jmx-scraper/build/libs/opentelemetry-jmx-scraper-$VERSION-alpha.jar opentelemetry-jmx-scraper.jar + cp jmx-scraper/build/libs/opentelemetry-jmx-scraper-$VERSION-alpha.jar.asc opentelemetry-jmx-scraper.jar.asc + gh release create --target $GITHUB_REF_NAME \ --title "Version $VERSION" \ --notes-file /tmp/release-notes.txt \ v$VERSION \ opentelemetry-jmx-metrics.jar \ - opentelemetry-jmx-metrics.jar.asc + opentelemetry-jmx-metrics.jar.asc \ + opentelemetry-jmx-scraper.jar \ + opentelemetry-jmx-scraper.jar.asc echo "version=$VERSION" >> $GITHUB_OUTPUT - merge-change-log-to-main: + post-release-updates: permissions: contents: write # for git push to PR branch runs-on: ubuntu-latest needs: - release steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # add change log sync (if any) into this PR since the apidiff update + # is required before any other PR can be merged anyway + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Copy change log section from release branch env: @@ -206,7 +191,7 @@ jobs: sed -n "0,/^## Version $VERSION /d;/^## Version /q;p" CHANGELOG.md \ > /tmp/changelog-section.md - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: ref: main @@ -218,10 +203,41 @@ jobs: release_date=$(gh release view v$VERSION --json publishedAt --jq .publishedAt | sed 's/T.*//') RELEASE_DATE=$release_date .github/scripts/merge-change-log-after-release.sh + - name: Wait for release to be available in maven central + env: + VERSION: ${{ needs.release.outputs.version }} + run: | + until curl --silent \ + --show-error \ + --output /dev/null \ + --head \ + --fail \ + https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-aws-xray/$VERSION/opentelemetry-aws-xray-$VERSION.jar + do + sleep 60 + done + + - name: Set up JDK for running Gradle + uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0 + with: + distribution: temurin + java-version: 17 + + - name: Set up Gradle + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 + + - name: Update apidiff baseline + env: + VERSION: ${{ needs.release.outputs.version }} + PRIOR_VERSION: ${{ needs.release.outputs.prior-version }} + run: | + ./gradlew japicmp -PapiBaseVersion=$PRIOR_VERSION -PapiNewVersion=$VERSION + ./gradlew --refresh-dependencies japicmp + - name: Use CLA approved bot run: .github/scripts/use-cla-approved-bot.sh - - uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2 + - uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} @@ -233,22 +249,14 @@ jobs: # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows GH_TOKEN: ${{ steps.otelbot-token.outputs.token }} run: | - if git diff --quiet; then - if [[ $VERSION == *.0 ]]; then - echo there are no updates to merge, not creating pull request - exit 0 # success - else - echo patch release notes did not get applied for some reason - exit 1 # failure - fi - fi - - message="Merge change log updates from $GITHUB_REF_NAME" - body="Merge log updates from \`$GITHUB_REF_NAME\`." - branch="otelbot/merge-change-log-updates-from-${GITHUB_REF_NAME//\//-}" + message="Post-release updates for $VERSION" + body="Post-release updates for \`$VERSION\`." + branch="otelbot/update-apidiff-baseline-to-released-version-${VERSION}" git checkout -b $branch - git commit -a -m "$message" + git add CHANGELOG.md + git add docs/apidiffs + git commit -m "$message" git push --set-upstream origin $branch gh pr create --title "$message" \ --body "$body" \ diff --git a/.github/workflows/reusable-link-check.yml b/.github/workflows/reusable-link-check.yml new file mode 100644 index 000000000..d778dea64 --- /dev/null +++ b/.github/workflows/reusable-link-check.yml @@ -0,0 +1,28 @@ +name: Reusable - Link check + +on: + workflow_call: + +permissions: + contents: read + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + fetch-depth: 0 # needed for merge-base used in lint:links-in-modified-files + + - uses: jdx/mise-action@e3d7b8d67a7958d1207f6ed871e83b1ea780e7b0 # v3.3.1 + + - name: Link check - relative links (all files) + if: github.event_name == 'pull_request' + env: + GITHUB_TOKEN: ${{ github.token }} + run: mise run lint:local-links + + - name: Link check (modified files only) + env: + GITHUB_TOKEN: ${{ github.token }} + run: mise run lint:links-in-modified-files --base origin/${{ github.base_ref }} --head ${{ github.event.pull_request.head.sha }} --event ${{ github.event_name }} diff --git a/.github/workflows/reusable-markdown-link-check.yml b/.github/workflows/reusable-markdown-link-check.yml deleted file mode 100644 index e8692d6a3..000000000 --- a/.github/workflows/reusable-markdown-link-check.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Reusable - Markdown link check - -on: - workflow_call: - -permissions: - contents: read - -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - uses: lycheeverse/lychee-action@1d97d84f0bc547f7b25f4c2170d87d810dc2fb2c # v2.4.0 - with: - # excluding links to pull requests and issues is done for performance - args: > - --include-fragments - --exclude "^https://github.com/open-telemetry/opentelemetry-java-contrib/(issue|pull)/\\d+$" - --max-retries 6 - . diff --git a/.github/workflows/reusable-markdown-lint.yml b/.github/workflows/reusable-markdown-lint.yml index f3688c191..83fc58586 100644 --- a/.github/workflows/reusable-markdown-lint.yml +++ b/.github/workflows/reusable-markdown-lint.yml @@ -10,8 +10,8 @@ jobs: markdown-lint-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Run markdownlint run: | - npx markdownlint-cli@0.44.0 -c .github/config/markdownlint.yml **/*.md + npx markdownlint-cli@0.45.0 -c .github/config/markdownlint.yml **/*.md diff --git a/.github/workflows/reusable-misspell-check.yml b/.github/workflows/reusable-misspell-check.yml index b1e266cdf..7e8ddaf77 100644 --- a/.github/workflows/reusable-misspell-check.yml +++ b/.github/workflows/reusable-misspell-check.yml @@ -10,7 +10,7 @@ jobs: misspell-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install misspell run: | diff --git a/.github/workflows/reusable-shell-script-check.yml b/.github/workflows/reusable-shell-script-check.yml index 4e5f51e0f..b2e403110 100644 --- a/.github/workflows/reusable-shell-script-check.yml +++ b/.github/workflows/reusable-shell-script-check.yml @@ -10,7 +10,7 @@ jobs: shell-script-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install shell check run: wget -qO- "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz" | tar -xJv diff --git a/.github/workflows/reusable-workflow-notification.yml b/.github/workflows/reusable-workflow-notification.yml index 701f90f5a..61e8d6267 100644 --- a/.github/workflows/reusable-workflow-notification.yml +++ b/.github/workflows/reusable-workflow-notification.yml @@ -19,7 +19,7 @@ jobs: issues: write runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Open issue or add comment if issue already open env: diff --git a/.gitignore b/.gitignore index 641751d61..4743c99e3 100644 --- a/.gitignore +++ b/.gitignore @@ -53,3 +53,6 @@ bin .swp .gitpod.yml + +# Gradle build scans +build-scan.txt diff --git a/.mise/tasks/lint/.shellcheckrc b/.mise/tasks/lint/.shellcheckrc new file mode 100644 index 000000000..c186fb835 --- /dev/null +++ b/.mise/tasks/lint/.shellcheckrc @@ -0,0 +1,3 @@ +# shellcheck configuration for mise tasks +# SC2154: usage_* variables are set by mise framework +disable=SC2154 diff --git a/.mise/tasks/lint/links-in-modified-files.sh b/.mise/tasks/lint/links-in-modified-files.sh new file mode 100755 index 000000000..524cc2d28 --- /dev/null +++ b/.mise/tasks/lint/links-in-modified-files.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +#MISE description="Lint links in modified files" + +set -e + +#USAGE flag "--base " help="base branch to compare against (default: origin/main)" default="origin/main" +#USAGE flag "--head " help="head branch to compare against (empty for local changes) (default: empty)" default="" +#USAGE flag "--event " help="event name (default: pull_request)" default="pull_request" + +if [ "$usage_head" = "''" ]; then + usage_head="" +fi + +# Check if lychee config was modified +# shellcheck disable=SC2086 +# - because usage_head may be empty +config_modified=$(git diff --name-only --merge-base "$usage_base" $usage_head \ + | grep -E '^(\.github/config/lychee\.toml|\.mise/tasks/lint/.*|mise\.toml)$' || true) + +if [ "$usage_event" != "pull_request" ] ; then + echo "Not a PR - checking all files." + mise run lint:links +elif [ -n "$config_modified" ] ; then + echo "config changes, checking all files." + mise run lint:links +else + # Using lychee's default extension filter here to match when it runs against all files + # Note: --diff-filter=d filters out deleted files + # shellcheck disable=SC2086 + # - because usage_head may be empty + modified_files=$(git diff --name-only --diff-filter=d "$usage_base" $usage_head \ + | grep -E '\.(md|mkd|mdx|mdown|mdwn|mkdn|mkdown|markdown|html|htm|txt)$' \ + | tr '\n' ' ' || true) + + if [ -z "$modified_files" ]; then + echo "No modified files, skipping link linting." + exit 0 + fi + + # shellcheck disable=SC2086 + mise run lint:links $modified_files +fi + diff --git a/.mise/tasks/lint/links.sh b/.mise/tasks/lint/links.sh new file mode 100755 index 000000000..f5f708535 --- /dev/null +++ b/.mise/tasks/lint/links.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +#MISE description="Lint links in all files" + +set -e + +#USAGE arg "" var=#true help="files to check" default="." + +for f in $usage_file; do + echo "Checking links in file: $f" +done + +# shellcheck disable=SC2086 +lychee --verbose --config .github/config/lychee.toml $usage_file diff --git a/.mise/tasks/lint/local-links.sh b/.mise/tasks/lint/local-links.sh new file mode 100755 index 000000000..f16cd3aa5 --- /dev/null +++ b/.mise/tasks/lint/local-links.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +#MISE description="Lint links in local files" + +set -e + +#USAGE arg "" var=#true help="files to check" default="." + +for f in $usage_file; do + echo "Checking links in file: $f" +done + +# shellcheck disable=SC2086 +lychee --verbose --scheme file --include-fragments $usage_file diff --git a/CHANGELOG.md b/CHANGELOG.md index ca4f7b3ad..1576667f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,184 @@ ## Unreleased +## Version 1.50.0 (2025-09-26) + +Note: This release broadly applies some style guidelines across the repository. As a result, +some classes that were visible might be package/private. Other non-final classes may now +be final. See +[#2182](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2182) +and +[#2210](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2210) +and +[#2212](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2212) +and +[#2213](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2212) +for examples and details. These changes are not expected to break anyone, so please open +an issue if this causes problems. + +### Baggage processor + +- Move baggage processor to the front of the processor list + ([#2152](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2152)) +- Add declarative configuration support + ([#2031](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2031)) + +### Disk buffering + +- Catching IllegalStateException in case of failed deserialization + ([#2157](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2157)) +- Apply final to public API classes where possible + ([#2216](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2216)) +- Handle empty attribute values + ([#2268](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2268)) + +### Inferred spans + +- Support dynamically changing the inferred span interval + ([#2153](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2153)) + +### JMX scraper + +- Implement stable `service.instance.id` + ([#2270](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2270)) + +### Kafka exporter + +- Add Kafka connectivity error handling + ([#2202](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2202)) + +### OpAMP client + +- Move important user-facing classes out of 'internal' package + ([#2249](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2249)) +- Exponential backoff retries on http connection failures + ([#2274](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2274)) + +### Span stack traces + +- Add declarative configuration support + ([#2262](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2262)) + + +## Version 1.49.0 (2025-08-25) + +### Consistent sampling + +- Add updateable threshold sampler for dynamic sampling configuration + ([#2137](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2137)) + +### Disk buffering + +- Introduce API changes for improved disk buffering functionality + ([#2084](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2084)) +- Implement more efficient serializer with direct disk write capabilities + ([#2138](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2138)) + +### IBM MQ metrics - New 🌟 + +IBM MQ metrics collection utility. + +### Inferred spans + +- Update async profiler to version 4.1 for improved performance + ([#2096](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2096)) + +### OpAMP client - New 🌟 + +OpenTelemetry Agent Management Protocol (OpAMP) client implementation. + +## Version 1.48.0 (2025-07-23) + +### AWS resources + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +### AWS X-Ray SDK support + +- Update SamplerRulesApplier to recognize new HTTP/URL semconv + ([#1959](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1959)) + +### Azure resources + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +### CloudFoundry resources + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +### Consistent sampling + +- Refactor ConsistentFixedThresholdSampler to prepare for dynamic threshold support + ([#2018](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2018)) +- ConsistentRateLimitingSampler can fail if used in combination with legacy samplers + ([#2022](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2022)) + +### GCP resources + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +### JMX metrics + +- Deprecate JMX Gatherer and provide migration guide to JMX Scraper + ([#2034](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2034)) + +### JMX scraper + +- Update Jetty metrics configuration corresponding to Java Instrumentation 2.18.0 + ([#2033](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2033)) +- Mark as production-ready and remove experimental status + ([#2034](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2034)) + +### Maven extension + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +### Resource providers + +- Support for declarative configuration + ([#2014](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/2014)) + +## Version 1.47.0 (2025-07-04) + +### Disk buffering + +- Shared storage + ([#1912](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1912)) +- Implementing ExtendedLogRecordData + ([#1918](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1918)) +- Add missing EventName to disk-buffering LogRecordDataMapper + ([#1950](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1950)) + +### GCP authentication extension + +- Update the internal implementation such that the required headers are retrieved + from the Google Auth Library instead of manually constructing and passing them. + ([#1860](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1860)) +- Add metrics support to auth extension + ([#1891](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1891)) +- Update ConfigurableOptions to read from ConfigProperties + ([#1904](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1904)) + +### Inferred spans + +- Upgrade async-profiler to 4.0 + ([#1872](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1872)) + +### Kafka exporter + +- Upgrade kafka-clients to 4.0 (and so now requires Java 11+) + ([#1802](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1802)) + +### Maven extension + +- Add option to record transferred artifacts + ([#1875](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1875)) + ## Version 1.46.0 (2025-04-11) ### Baggage processor @@ -151,7 +329,7 @@ The extension takes care of the necessary configuration required to authenticate The future of the [JMX metrics](./jmx-metrics/README.md) component, built on top of the -[JMX metrics](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/jmx-metrics/javaagent#jmx-metric-insight) +[JMX metrics](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/README.md#jmx-metric-insight) component from the opentelemetry-java-instrumentation repository. ### Maven extension diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f482397c7..04a50c300 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,82 +1,65 @@ # Contributing -Welcome to the OpenTelemetry Java Contrib Repository! +Welcome to the OpenTelemetry Java Contrib repository! ## Introduction -This repository focuses on providing tools and utilities for Java-based observability, such as remote JMX metric gathering and reporting. We’re excited to have you here! Whether you’re fixing a bug, adding a feature, or suggesting an idea, your contributions are invaluable. +This repository provides observability libraries and utilities for Java applications that complement +the [OpenTelemetry Java SDK](https://github.com/open-telemetry/opentelemetry-java) and +[OpenTelemetry Java Instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) +projects. -Before submitting new features or changes to current functionality, it is recommended to first -[open an issue](https://github.com/open-telemetry/opentelemetry-java-contrib/issues/new) -and discuss your ideas or propose the changes you wish to make. - -Questions? Ask in the OpenTelemetry [java channel](https://cloud-native.slack.com/archives/C014L2KCTE3) +Before submitting new features or changes, please consider +[opening an issue](https://github.com/open-telemetry/opentelemetry-java-contrib/issues/new) first to +discuss your ideas. Pull requests for bug fixes are always welcome! -## Pre-requisites - -To work with this repository, ensure you have: - -### Tools: - -Java 17 or higher - -### Platform Notes: - -macOS/Linux: Ensure JAVA_HOME is set correctly. - -## Workflow - -1. Fork the repository -2. Clone locally -3. Create a branch before working on an issue - -## Local Run/Build +## Building and Testing -In order to build and test this whole repository you need JDK 11+. +While most modules target Java 8, building this project requires Java 17 or higher. -#### Snapshot builds - -For developers testing code changes before a release is complete, there are -snapshot builds of the `main` branch. They are available from -the Sonatype OSS snapshots repository at `https://oss.sonatype.org/content/repositories/snapshots/` -([browse](https://oss.sonatype.org/content/repositories/snapshots/io/opentelemetry/contrib/)) - -#### Building from source - -Building using Java 11+: +To build the project: ```bash -$ java -version +./gradlew assemble ``` +To run the tests: + ```bash -$ ./gradlew assemble +./gradlew test ``` -## Testing +Some modules include integration tests that can be run with: ```bash -$ ./gradlew test +./gradlew integrationTest ``` -### Some modules have integration tests +## Snapshot Builds -``` -$ ./gradlew integrationTest -``` +Snapshot builds of the `main` branch are available from the Sonatype snapshot repository at: +`https://central.sonatype.com/repository/maven-snapshots/` +([browse](https://central.sonatype.com/service/rest/repository/browse/maven-snapshots/io/opentelemetry/contrib/)). + +## Style Guide + +See [Style Guide](docs/style-guide.md). -Follow the Java Instrumentation [Style Guide](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/docs/contributing/style-guideline.md) from the opentelemetry-java-instrumentation repository. +## Pull Request Guidelines -Failure? Check logs for errors or mismatched dependencies. +When submitting a pull request, please ensure that you: -## Gradle conventions +- Clearly describe the change and its motivation +- Mention any breaking changes +- Include tests for new functionality +- Follow the [Style Guide](docs/style-guide.md) -- Use kotlin instead of groovy -- Plugin versions should be specified in `settings.gradle.kts`, not in individual modules -- All modules use `plugins { id("otel.java-conventions") }` +## Getting Help -## Further Help +If you need assistance or have questions: -Join [#otel-java](https://cloud-native.slack.com/archives/C014L2KCTE3) on OpenTelemetry Slack +- Post on the [#otel-java](https://cloud-native.slack.com/archives/C014L2KCTE3) Slack channel +- [Open an issue](https://github.com/open-telemetry/opentelemetry-java-contrib/issues/new/choose) in + this repository diff --git a/README.md b/README.md index 33eb10f34..63dd49369 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ feature or via instrumentation, this project is hopefully for you. | alpha | [GCP Authentication Extension](./gcp-auth-extension/README.md) | | beta | [GCP Resources](./gcp-resources/README.md) | | beta | [Inferred Spans](./inferred-spans/README.md) | +| alpha | [IBM MQ Metrics](./ibm-mq-metrics/README.md) | | alpha | [JFR Connection](./jfr-connection/README.md) | | alpha | [JFR Events](./jfr-events/README.md) | | alpha | [JMX Metric Gatherer](./jmx-metrics/README.md) | @@ -47,56 +48,36 @@ On reaching stable status, the `otel.stable` value in `gradle.properties` should Note that currently all the libraries are released together with the version of this repo, so breaking changes (after stable status is reached) would bump the major version of all libraries together. This could get complicated so `stable` has a high bar. -## Getting Started - -```bash -# Apply formatting -$ ./gradlew spotlessApply - -# Build the complete project -$ ./gradlew build - -# Run integration tests -$ ./gradlew integrationTest - -# Clean artifacts -$ ./gradlew clean -``` - ## Contributing -The Java Contrib project was initially formed to provide methods of easy remote JMX metric gathering and reporting, -which is actively in development. If you have an idea for a similar use case in the metrics, traces, or logging -domain we would be very interested in supporting it. Please -[open an issue](https://github.com/open-telemetry/opentelemetry-java-contrib/issues/new/choose) to share your idea or -suggestion. PRs are always welcome and greatly appreciated, but for larger functional changes a pre-coding introduction -can be helpful to ensure this is the correct place and that active or conflicting efforts don't exist. +See [CONTRIBUTING.md](CONTRIBUTING.md). -Triagers ([@open-telemetry/java-contrib-triagers](https://github.com/orgs/open-telemetry/teams/java-contrib-triagers)): +### Maintainers -- All [component owners](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/.github.amrom.workers.devponent_owners.yml) are given Triager permissions to this repository. +- [Jack Berg](https://github.com/jack-berg), Grafana Labs +- [Jason Plumb](https://github.com/breedx-splk), Splunk +- [Lauri Tulmin](https://github.com/laurit), Splunk +- [Trask Stalnaker](https://github.com/trask), Microsoft -Approvers ([@open-telemetry/java-contrib-approvers](https://github.com/orgs/open-telemetry/teams/java-contrib-approvers)): +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). -- [John Watson](https://github.com/jkwatson), Verta.ai +### Approvers -Maintainers ([@open-telemetry/java-contrib-maintainers](https://github.com/orgs/open-telemetry/teams/java-contrib-maintainers)): +- [Jay DeLuca](https://github.com/jaydeluca), Grafana Labs +- [John Watson](https://github.com/jkwatson), Cloudera -- [Jack Berg](https://github.com/jack-berg), New Relic -- [Jason Plumb](https://github.com/breedx-splk), Splunk -- [Lauri Tulmin](https://github.com/laurit), Splunk -- [Trask Stalnaker](https://github.com/trask), Microsoft +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). -Emeritus maintainers: +### Emeritus maintainers - [Mateusz Rzeszutek](https://github.com/mateuszrzeszutek) - [Nikita Salnikov-Tarnovski](https://github.com/iNikem) - [Ryan Fitzpatrick](https://github.com/rmfitzpatrick) -Learn more about roles in the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). -Thanks to all the people who already contributed! +### Thanks to all of our contributors! - + Repo contributors diff --git a/RELEASING.md b/RELEASING.md index afb1b8162..70d044163 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -7,8 +7,8 @@ The version is specified in [version.gradle.kts](version.gradle.kts). ## Snapshot builds Every successful CI build of the main branch automatically executes `./gradlew publishToSonatype` -as the last step, which publishes a snapshot build to -[Sonatype OSS snapshots repository](https://oss.sonatype.org/content/repositories/snapshots/io/opentelemetry/contrib/). +as the last step, which publishes a snapshot build to the +[Sonatype snapshot repository](https://central.sonatype.com/service/rest/repository/browse/maven-snapshots/io/opentelemetry/contrib/). ## Release cadence diff --git a/all/README.md b/all/README.md deleted file mode 100644 index dca83da7b..000000000 --- a/all/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# opentelemetry-contrib-all (utility project) - -This is a utility project which depends on all other projects in this repository. -We use it for collecting all coverage reports from all modules for uploading to codecov. diff --git a/all/build.gradle.kts b/all/build.gradle.kts deleted file mode 100644 index 58f0f6d72..000000000 --- a/all/build.gradle.kts +++ /dev/null @@ -1,64 +0,0 @@ -plugins { - `jacoco-report-aggregation` - - id("otel.java-conventions") -} - -description = "OpenTelemetry Contrib All" - -dependencies { - rootProject.subprojects.forEach { subproject -> - // Generate aggregate coverage report for published modules that enable jacoco. - subproject.plugins.withId("jacoco") { - subproject.plugins.withId("maven-publish") { - // TODO(anuraaga): Figure out how to avoid transitive dependencies being pulled into jacoco due to the use - // of shadow plugin. - if (subproject.name != "jmx-metrics") { - implementation(project(subproject.path)) { - isTransitive = false - } - } - } - } - } -} - -tasks { - // We don't compile anything here. This project is mostly for - // aggregating jacoco reports and it doesn't work if this isn't at least as high as the - // highest supported Java version in any of our projects. Most of our projects target - // Java 8, but some target Java 11 or 17. - withType(JavaCompile::class) { - options.release.set(17) - } -} - -afterEvaluate { - tasks { - testCodeCoverageReport { - classDirectories.setFrom( - classDirectories.files.map { - zipTree(it).filter { - // Exclude mrjar (jacoco complains), shaded, and generated code - !it.absolutePath.contains("META-INF/versions/") && - !it.absolutePath.contains("AutoValue_") - } - }, - ) - - reports { - // xml is usually used to integrate code coverage with - // other tools like SonarQube, Coveralls or Codecov - xml.required.set(true) - - // HTML reports can be used to see code coverage - // without any external tools - html.required.set(true) - } - } - } -} - -dependencyCheck { - skip = true -} diff --git a/aws-resources/build.gradle.kts b/aws-resources/build.gradle.kts index ede0dad59..8c56b17df 100644 --- a/aws-resources/build.gradle.kts +++ b/aws-resources/build.gradle.kts @@ -9,6 +9,7 @@ otelJava.moduleName.set("io.opentelemetry.contrib.aws.resource") dependencies { api("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") api("io.opentelemetry:opentelemetry-sdk") implementation("io.opentelemetry.semconv:opentelemetry-semconv") @@ -20,10 +21,24 @@ dependencies { implementation("com.squareup.okhttp3:okhttp") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") + testImplementation("io.opentelemetry:opentelemetry-exporter-logging") testImplementation("com.linecorp.armeria:armeria-junit5") testRuntimeOnly("org.bouncycastle:bcpkix-jdk15on") testImplementation("com.google.guava:guava") testImplementation("org.skyscreamer:jsonassert") } + +tasks { + withType().configureEach { + environment( + "AWS_REGION" to "us-east-1", + "AWS_LAMBDA_FUNCTION_NAME" to "my-function", + "AWS_LAMBDA_FUNCTION_VERSION" to "1.2.3" + ) + jvmArgs("-Dotel.experimental.config.file=${project.projectDir.resolve("src/test/resources/declarative-config.yaml")}") + } +} diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/BeanstalkResource.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/BeanstalkResource.java index 9f294d4fb..7441b7c81 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/BeanstalkResource.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/BeanstalkResource.java @@ -12,6 +12,7 @@ import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.SERVICE_INSTANCE_ID; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.SERVICE_NAMESPACE; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_VERSION; +import static java.util.logging.Level.WARNING; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; @@ -22,7 +23,6 @@ import io.opentelemetry.semconv.SchemaUrls; import java.io.File; import java.io.IOException; -import java.util.logging.Level; import java.util.logging.Logger; /** @@ -65,7 +65,7 @@ static Resource buildResource(String configPath) { parser.nextToken(); if (!parser.isExpectedStartObjectToken()) { - logger.log(Level.WARNING, "Invalid Beanstalk config: ", configPath); + logger.log(WARNING, "Invalid Beanstalk config: ", configPath); return Resource.create(attrBuilders.build(), SchemaUrls.V1_25_0); } @@ -87,7 +87,7 @@ static Resource buildResource(String configPath) { } } } catch (IOException e) { - logger.log(Level.WARNING, "Could not parse Beanstalk config.", e); + logger.log(WARNING, "Could not parse Beanstalk config.", e); return Resource.empty(); } diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/DockerHelper.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/DockerHelper.java index 5b8aefeac..a52cf1c18 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/DockerHelper.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/DockerHelper.java @@ -5,11 +5,12 @@ package io.opentelemetry.contrib.aws.resource; +import static java.util.logging.Level.WARNING; + import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; -import java.util.logging.Level; import java.util.logging.Logger; class DockerHelper { @@ -44,9 +45,9 @@ public String getContainerId() { } } } catch (FileNotFoundException e) { - logger.log(Level.WARNING, "Failed to read container id, cgroup file does not exist."); + logger.log(WARNING, "Failed to read container id, cgroup file does not exist."); } catch (IOException e) { - logger.log(Level.WARNING, "Unable to read container id: " + e.getMessage()); + logger.log(WARNING, "Unable to read container id: " + e.getMessage()); } return ""; diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/Ec2Resource.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/Ec2Resource.java index d4bdb4228..5aa930b1c 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/Ec2Resource.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/Ec2Resource.java @@ -15,6 +15,7 @@ import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_IMAGE_ID; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_NAME; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.HOST_TYPE; +import static java.util.logging.Level.WARNING; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; @@ -28,7 +29,6 @@ import java.net.URL; import java.util.HashMap; import java.util.Map; -import java.util.logging.Level; import java.util.logging.Logger; /** @@ -125,7 +125,7 @@ static Resource buildResource(String endpoint) { } } } catch (IOException e) { - logger.log(Level.WARNING, "Could not parse identity document, resource not filled.", e); + logger.log(WARNING, "Could not parse identity document, resource not filled.", e); return Resource.empty(); } diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EcsResource.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EcsResource.java index de6d50afe..83440819b 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EcsResource.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EcsResource.java @@ -26,6 +26,9 @@ import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CONTAINER_NAME; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_ECS; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.logging.Level.WARNING; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; @@ -35,11 +38,9 @@ import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.semconv.SchemaUrls; import java.io.IOException; -import java.util.Collections; import java.util.Locale; import java.util.Map; import java.util.Optional; -import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -88,7 +89,7 @@ static Resource buildResource(Map sysEnv, SimpleHttpClient httpC static void fetchMetadata( SimpleHttpClient httpClient, String url, AttributesBuilder attrBuilders) { - String json = httpClient.fetchString("GET", url, Collections.emptyMap(), null); + String json = httpClient.fetchString("GET", url, emptyMap(), null); if (json.isEmpty()) { return; } @@ -103,17 +104,17 @@ static void fetchMetadata( .getLogGroupArn() .ifPresent( logGroupArn -> { - attrBuilders.put(AWS_LOG_GROUP_ARNS, Collections.singletonList(logGroupArn)); + attrBuilders.put(AWS_LOG_GROUP_ARNS, singletonList(logGroupArn)); }); logArnBuilder .getLogStreamArn() .ifPresent( logStreamArn -> { - attrBuilders.put(AWS_LOG_STREAM_ARNS, Collections.singletonList(logStreamArn)); + attrBuilders.put(AWS_LOG_STREAM_ARNS, singletonList(logStreamArn)); }); } catch (IOException e) { - logger.log(Level.WARNING, "Can't get ECS metadata", e); + logger.log(WARNING, "Can't get ECS metadata", e); } } @@ -156,7 +157,7 @@ static void parseResponse( JsonParser parser, AttributesBuilder attrBuilders, LogArnBuilder logArnBuilder) throws IOException { if (!parser.isExpectedStartObjectToken()) { - logger.log(Level.WARNING, "Couldn't parse ECS metadata, invalid JSON"); + logger.log(WARNING, "Couldn't parse ECS metadata, invalid JSON"); return; } @@ -314,7 +315,7 @@ private static class DockerImage { private static final Pattern imagePattern = Pattern.compile( - "^(?([^/\\s]+/)?([^:\\s]+))(:(?[^@\\s]+))?(@sha256:(?\\d+))?$"); + "^(?([^/\\s]+/)?([^:\\s]+))(:(?[^@\\s]+))?(@sha256:(?[\\da-fA-F]+))?$"); final String repository; final String tag; @@ -339,7 +340,7 @@ static DockerImage parse(@Nullable String image) { } Matcher matcher = imagePattern.matcher(image); if (!matcher.matches()) { - logger.log(Level.WARNING, "Couldn't parse image '" + image + "'"); + logger.log(WARNING, "Couldn't parse image '" + image + "'"); return null; } String repository = matcher.group("repository"); diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EksResource.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EksResource.java index 8ed3fb512..156755446 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EksResource.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/EksResource.java @@ -11,6 +11,8 @@ import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AWS_EKS; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.CloudProviderIncubatingValues.AWS; import static io.opentelemetry.contrib.aws.resource.IncubatingAttributes.K8S_CLUSTER_NAME; +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.WARNING; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; @@ -26,7 +28,6 @@ import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; -import java.util.logging.Level; import java.util.logging.Logger; /** @@ -91,7 +92,7 @@ static Resource buildResource( private static boolean isEks( String k8sTokenPath, String k8sKeystorePath, SimpleHttpClient httpClient) { if (!isK8s(k8sTokenPath, k8sKeystorePath)) { - logger.log(Level.FINE, "Not running on k8s."); + logger.log(FINE, "Not running on k8s."); return false; } @@ -145,7 +146,7 @@ private static String getClusterName(SimpleHttpClient httpClient) { } } } catch (IOException e) { - logger.log(Level.WARNING, "Can't get cluster name on EKS.", e); + logger.log(WARNING, "Can't get cluster name on EKS.", e); } return ""; } @@ -156,7 +157,7 @@ private static String getK8sCredHeader() { new String(Files.readAllBytes(Paths.get(K8S_TOKEN_PATH)), StandardCharsets.UTF_8); return "Bearer " + content; } catch (IOException e) { - logger.log(Level.WARNING, "Unable to load K8s client token.", e); + logger.log(WARNING, "Unable to load K8s client token.", e); } return ""; } diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/SimpleHttpClient.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/SimpleHttpClient.java index 12bc6e34e..f78719d99 100644 --- a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/SimpleHttpClient.java +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/SimpleHttpClient.java @@ -5,6 +5,9 @@ package io.opentelemetry.contrib.aws.resource; +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.WARNING; + import java.io.FileInputStream; import java.io.IOException; import java.security.KeyStore; @@ -13,7 +16,6 @@ import java.time.Duration; import java.util.Collection; import java.util.Map; -import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; import javax.net.ssl.SSLContext; @@ -72,7 +74,7 @@ public String fetchString( int responseCode = response.code(); if (responseCode != 200) { logger.log( - Level.FINE, + FINE, "Error response from " + urlStr + " code (" @@ -84,7 +86,7 @@ public String fetchString( ResponseBody body = response.body(); return body != null ? body.string() : ""; } catch (IOException e) { - logger.log(Level.FINE, "SimpleHttpClient fetch string failed.", e); + logger.log(FINE, "SimpleHttpClient fetch string failed.", e); } return ""; @@ -101,7 +103,7 @@ private static X509TrustManager buildTrustManager(@Nullable KeyStore keyStore) { tmf.init(keyStore); return (X509TrustManager) tmf.getTrustManagers()[0]; } catch (Exception e) { - logger.log(Level.WARNING, "Build SslSocketFactory for K8s restful client exception.", e); + logger.log(WARNING, "Build SslSocketFactory for K8s restful client exception.", e); return null; } } @@ -117,7 +119,7 @@ private static SSLSocketFactory buildSslSocketFactory(@Nullable TrustManager tru return context.getSocketFactory(); } catch (Exception e) { - logger.log(Level.WARNING, "Build SslSocketFactory for K8s restful client exception.", e); + logger.log(WARNING, "Build SslSocketFactory for K8s restful client exception.", e); } return null; } @@ -138,7 +140,7 @@ private static KeyStore getKeystoreForTrustedCert(String certPath) { } return trustStore; } catch (Exception e) { - logger.log(Level.WARNING, "Cannot load KeyStore from " + certPath); + logger.log(WARNING, "Cannot load KeyStore from " + certPath); return null; } } diff --git a/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/internal/AwsResourceDetector.java b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/internal/AwsResourceDetector.java new file mode 100644 index 000000000..ae4255570 --- /dev/null +++ b/aws-resources/src/main/java/io/opentelemetry/contrib/aws/resource/internal/AwsResourceDetector.java @@ -0,0 +1,40 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.aws.resource.internal; + +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.contrib.aws.resource.BeanstalkResource; +import io.opentelemetry.contrib.aws.resource.Ec2Resource; +import io.opentelemetry.contrib.aws.resource.EcsResource; +import io.opentelemetry.contrib.aws.resource.EksResource; +import io.opentelemetry.contrib.aws.resource.LambdaResource; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.resources.ResourceBuilder; + +public class AwsResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "aws"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + ResourceBuilder builder = Resource.builder(); + builder.putAll(BeanstalkResource.get()); + builder.putAll(Ec2Resource.get()); + builder.putAll(EcsResource.get()); + builder.putAll(EksResource.get()); + builder.putAll(LambdaResource.get()); + return builder.build(); + } +} diff --git a/aws-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider b/aws-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider new file mode 100644 index 000000000..ea6d743f4 --- /dev/null +++ b/aws-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider @@ -0,0 +1 @@ +io.opentelemetry.contrib.aws.resource.internal.AwsResourceDetector diff --git a/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/EksResourceTest.java b/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/EksResourceTest.java index 7eaec5e55..31b05be57 100644 --- a/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/EksResourceTest.java +++ b/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/EksResourceTest.java @@ -34,7 +34,7 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class EksResourceTest { +class EksResourceTest { @Mock private DockerHelper mockDockerHelper; diff --git a/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/ResourceComponentProviderTest.java b/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/ResourceComponentProviderTest.java new file mode 100644 index 000000000..51e21854b --- /dev/null +++ b/aws-resources/src/test/java/io/opentelemetry/contrib/aws/resource/ResourceComponentProviderTest.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.aws.resource; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; +import io.opentelemetry.semconv.incubating.CloudIncubatingAttributes; +import org.assertj.core.api.InstanceOfAssertFactory; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + @Test + void endToEnd() { + assertThat( + AutoConfiguredOpenTelemetrySdk.builder() + .build() + .getOpenTelemetrySdk() + .getSdkTracerProvider()) + .extracting("sharedState") + .extracting("resource") + .extracting( + "attributes", + new InstanceOfAssertFactory<>(Attributes.class, OpenTelemetryAssertions::assertThat)) + .containsEntry( + CloudIncubatingAttributes.CLOUD_PROVIDER, + CloudIncubatingAttributes.CloudProviderIncubatingValues.AWS); + } +} diff --git a/aws-resources/src/test/resources/declarative-config.yaml b/aws-resources/src/test/resources/declarative-config.yaml new file mode 100644 index 000000000..da52af7d7 --- /dev/null +++ b/aws-resources/src/test/resources/declarative-config.yaml @@ -0,0 +1,10 @@ +file_format: "1.0-rc.1" +resource: + detection/development: + detectors: + - aws: +tracer_provider: + processors: + - simple: + exporter: + console: diff --git a/aws-resources/src/test/resources/ecs-container-metadata-v3.json b/aws-resources/src/test/resources/ecs-container-metadata-v3.json index 2e89ffe59..f7cc0f53f 100644 --- a/aws-resources/src/test/resources/ecs-container-metadata-v3.json +++ b/aws-resources/src/test/resources/ecs-container-metadata-v3.json @@ -2,7 +2,7 @@ "DockerId": "43481a6ce4842eec8fe72fc28500c6b52edcc0917f105b83379f88cac1ff3946", "Name": "nginx-curl", "DockerName": "ecs-nginx-5-nginx-curl-ccccb9f49db0dfe0d901", - "Image": "nrdlngr/nginx-curl", + "Image": "nrdlngr/nginx-curl:latest@sha256:8dc35e9386b5d280d285ae7a78d271a5d4a82106cb254fbed5fde4923faa8deb", "ImageID": "sha256:2e00ae64383cfc865ba0a2ba37f61b50a120d2d9378559dcd458dc0de47bc165", "Labels": { "com.amazonaws.ecs.cluster": "default", @@ -28,4 +28,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/aws-xray-propagator/src/main/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayLambdaPropagator.java b/aws-xray-propagator/src/main/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayLambdaPropagator.java index a6b6a2ab4..b34bc961c 100644 --- a/aws-xray-propagator/src/main/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayLambdaPropagator.java +++ b/aws-xray-propagator/src/main/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayLambdaPropagator.java @@ -5,12 +5,13 @@ package io.opentelemetry.contrib.awsxray.propagator; +import static java.util.Collections.singletonMap; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.context.Context; import io.opentelemetry.context.propagation.TextMapGetter; import io.opentelemetry.context.propagation.TextMapPropagator; import io.opentelemetry.context.propagation.TextMapSetter; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -75,7 +76,7 @@ public Context extract(Context context, @Nullable C carrier, TextMapGetter FIELDS = Collections.singletonList(TRACE_HEADER_KEY); + private static final List FIELDS = singletonList(TRACE_HEADER_KEY); private static final AwsXrayPropagator INSTANCE = new AwsXrayPropagator(); diff --git a/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayCompositePropagatorTest.java b/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayCompositePropagatorTest.java index 53e806fdc..99998d974 100644 --- a/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayCompositePropagatorTest.java +++ b/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/AwsXrayCompositePropagatorTest.java @@ -15,7 +15,7 @@ import java.util.LinkedHashMap; import org.junit.jupiter.api.Test; -public class AwsXrayCompositePropagatorTest extends AwsXrayPropagatorTest { +class AwsXrayCompositePropagatorTest extends AwsXrayPropagatorTest { @Override TextMapPropagator propagator() { diff --git a/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/internal/AwsComponentProviderTest.java b/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/internal/AwsComponentProviderTest.java index 1d1590d3a..5bcd62137 100644 --- a/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/internal/AwsComponentProviderTest.java +++ b/aws-xray-propagator/src/test/java/io/opentelemetry/contrib/awsxray/propagator/internal/AwsComponentProviderTest.java @@ -20,7 +20,12 @@ class AwsComponentProviderTest { @Test void endToEnd() { - String yaml = "file_format: 0.3\n" + "propagator:\n" + " composite: [xray, xray-lambda]\n"; + String yaml = + "file_format: 1.0-rc.1\n" + + "propagator:\n" + + " composite:\n" + + " - xray:\n" + + " - xray-lambda:\n"; OpenTelemetrySdk openTelemetrySdk = DeclarativeConfiguration.parseAndCreate( diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsAttributeKeys.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsAttributeKeys.java index 101641f08..c9e762d63 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsAttributeKeys.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsAttributeKeys.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.awsxray; +import static io.opentelemetry.api.common.AttributeKey.stringKey; + import io.opentelemetry.api.common.AttributeKey; /** Utility class holding attribute keys with special meaning to AWS components */ @@ -12,28 +14,25 @@ final class AwsAttributeKeys { private AwsAttributeKeys() {} - static final AttributeKey AWS_SPAN_KIND = AttributeKey.stringKey("aws.span.kind"); + static final AttributeKey AWS_SPAN_KIND = stringKey("aws.span.kind"); - static final AttributeKey AWS_LOCAL_SERVICE = AttributeKey.stringKey("aws.local.service"); + static final AttributeKey AWS_LOCAL_SERVICE = stringKey("aws.local.service"); - static final AttributeKey AWS_LOCAL_OPERATION = - AttributeKey.stringKey("aws.local.operation"); + static final AttributeKey AWS_LOCAL_OPERATION = stringKey("aws.local.operation"); - static final AttributeKey AWS_REMOTE_SERVICE = - AttributeKey.stringKey("aws.remote.service"); + static final AttributeKey AWS_REMOTE_SERVICE = stringKey("aws.remote.service"); - static final AttributeKey AWS_REMOTE_OPERATION = - AttributeKey.stringKey("aws.remote.operation"); + static final AttributeKey AWS_REMOTE_OPERATION = stringKey("aws.remote.operation"); - static final AttributeKey AWS_REMOTE_TARGET = AttributeKey.stringKey("aws.remote.target"); + static final AttributeKey AWS_REMOTE_TARGET = stringKey("aws.remote.target"); // use the same AWS Resource attribute name defined by OTel java auto-instr for aws_sdk_v_1_1 // TODO: all AWS specific attributes should be defined in semconv package and reused cross all // otel packages. Related sim - // https://github.com/open-telemetry/opentelemetry-java-instrumentation/issues/8710 - static final AttributeKey AWS_BUCKET_NAME = AttributeKey.stringKey("aws.bucket.name"); - static final AttributeKey AWS_QUEUE_NAME = AttributeKey.stringKey("aws.queue.name"); - static final AttributeKey AWS_STREAM_NAME = AttributeKey.stringKey("aws.stream.name"); - static final AttributeKey AWS_TABLE_NAME = AttributeKey.stringKey("aws.table.name"); + static final AttributeKey AWS_BUCKET_NAME = stringKey("aws.bucket.name"); + static final AttributeKey AWS_QUEUE_NAME = stringKey("aws.queue.name"); + static final AttributeKey AWS_STREAM_NAME = stringKey("aws.stream.name"); + static final AttributeKey AWS_TABLE_NAME = stringKey("aws.table.name"); } diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGenerator.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGenerator.java index 42275b4b1..3fed3b3f3 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGenerator.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGenerator.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.awsxray; +import static io.opentelemetry.api.common.AttributeKey.longKey; +import static io.opentelemetry.api.common.AttributeKey.stringKey; import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_BUCKET_NAME; import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_LOCAL_OPERATION; import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_LOCAL_SERVICE; @@ -16,6 +18,7 @@ import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_STREAM_NAME; import static io.opentelemetry.contrib.awsxray.AwsAttributeKeys.AWS_TABLE_NAME; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; +import static java.util.logging.Level.FINEST; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -27,7 +30,6 @@ import java.net.MalformedURLException; import java.net.URL; import java.util.Optional; -import java.util.logging.Level; import java.util.logging.Logger; /** @@ -54,36 +56,31 @@ final class AwsMetricAttributeGenerator implements MetricAttributeGenerator { private static final String UNKNOWN_REMOTE_OPERATION = "UnknownRemoteOperation"; // copied from DbIncubatingAttributes - private static final AttributeKey DB_OPERATION = AttributeKey.stringKey("db.operation"); - private static final AttributeKey DB_SYSTEM = AttributeKey.stringKey("db.system"); + private static final AttributeKey DB_OPERATION = stringKey("db.operation"); + private static final AttributeKey DB_SYSTEM = stringKey("db.system"); // copied from FaasIncubatingAttributes - private static final AttributeKey FAAS_INVOKED_NAME = - AttributeKey.stringKey("faas.invoked_name"); - private static final AttributeKey FAAS_TRIGGER = AttributeKey.stringKey("faas.trigger"); + private static final AttributeKey FAAS_INVOKED_NAME = stringKey("faas.invoked_name"); + private static final AttributeKey FAAS_TRIGGER = stringKey("faas.trigger"); // copied from GraphqlIncubatingAttributes private static final AttributeKey GRAPHQL_OPERATION_TYPE = - AttributeKey.stringKey("graphql.operation.type"); + stringKey("graphql.operation.type"); // copied from HttpIncubatingAttributes - private static final AttributeKey HTTP_METHOD = AttributeKey.stringKey("http.method"); - private static final AttributeKey HTTP_TARGET = AttributeKey.stringKey("http.target"); - private static final AttributeKey HTTP_URL = AttributeKey.stringKey("http.url"); + private static final AttributeKey HTTP_METHOD = stringKey("http.method"); + private static final AttributeKey HTTP_TARGET = stringKey("http.target"); + private static final AttributeKey HTTP_URL = stringKey("http.url"); // copied from MessagingIncubatingAttributes - private static final AttributeKey MESSAGING_OPERATION = - AttributeKey.stringKey("messaging.operation"); - private static final AttributeKey MESSAGING_SYSTEM = - AttributeKey.stringKey("messaging.system"); + private static final AttributeKey MESSAGING_OPERATION = stringKey("messaging.operation"); + private static final AttributeKey MESSAGING_SYSTEM = stringKey("messaging.system"); // copied from NetIncubatingAttributes - private static final AttributeKey NET_PEER_NAME = AttributeKey.stringKey("net.peer.name"); - private static final AttributeKey NET_PEER_PORT = AttributeKey.longKey("net.peer.port"); - private static final AttributeKey NET_SOCK_PEER_ADDR = - AttributeKey.stringKey("net.sock.peer.addr"); - private static final AttributeKey NET_SOCK_PEER_PORT = - AttributeKey.longKey("net.sock.peer.port"); + private static final AttributeKey NET_PEER_NAME = stringKey("net.peer.name"); + private static final AttributeKey NET_PEER_PORT = longKey("net.peer.port"); + private static final AttributeKey NET_SOCK_PEER_ADDR = stringKey("net.sock.peer.addr"); + private static final AttributeKey NET_SOCK_PEER_PORT = longKey("net.sock.peer.port"); // copied from PeerIncubatingAttributes - private static final AttributeKey PEER_SERVICE = AttributeKey.stringKey("peer.service"); + private static final AttributeKey PEER_SERVICE = stringKey("peer.service"); // copied from RpcIncubatingAttributes - private static final AttributeKey RPC_METHOD = AttributeKey.stringKey("rpc.method"); - private static final AttributeKey RPC_SERVICE = AttributeKey.stringKey("rpc.service"); + private static final AttributeKey RPC_METHOD = stringKey("rpc.method"); + private static final AttributeKey RPC_SERVICE = stringKey("rpc.service"); @Override public Attributes generateMetricAttributesFromSpan(SpanData span, Resource resource) { @@ -307,7 +304,7 @@ private static String generateRemoteOperation(SpanData span) { remoteOperation = extractApiPathValue(url.getPath()); } } catch (MalformedURLException e) { - logger.log(Level.FINEST, "invalid http.url attribute: ", httpUrl); + logger.log(FINEST, "invalid http.url attribute: ", httpUrl); } } if (isKeyPresent(span, HTTP_METHOD)) { @@ -387,6 +384,6 @@ private static void logUnknownAttribute(AttributeKey attributeKey, SpanD String[] params = { attributeKey.getKey(), span.getKind().name(), span.getSpanContext().getSpanId() }; - logger.log(Level.FINEST, "No valid {0} value found for {1} span {2}", params); + logger.log(FINEST, "No valid {0} value found for {1} span {2}", params); } } diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessor.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessor.java index 244138a47..69ca18476 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessor.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessor.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.awsxray; +import static io.opentelemetry.api.common.AttributeKey.longKey; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.metrics.DoubleHistogram; @@ -40,8 +42,7 @@ @Immutable public final class AwsSpanMetricsProcessor implements SpanProcessor { - private static final AttributeKey HTTP_STATUS_CODE = - AttributeKey.longKey("http.status_code"); + private static final AttributeKey HTTP_STATUS_CODE = longKey("http.status_code"); private static final double NANOS_TO_MILLIS = 1_000_000.0; @@ -152,16 +153,16 @@ private static Long getAwsStatusCode(SpanData spanData) { Throwable throwable = exceptionEvent.getException(); try { - Method method = throwable.getClass().getMethod("getStatusCode", new Class[] {}); - Object code = method.invoke(throwable, new Object[] {}); + Method method = throwable.getClass().getMethod("getStatusCode"); + Object code = method.invoke(throwable); return Long.valueOf((Integer) code); } catch (Exception e) { // Take no action } try { - Method method = throwable.getClass().getMethod("statusCode", new Class[] {}); - Object code = method.invoke(throwable, new Object[] {}); + Method method = throwable.getClass().getMethod("statusCode"); + Object code = method.invoke(throwable); return Long.valueOf((Integer) code); } catch (Exception e) { // Take no action diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSampler.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSampler.java index ad9b72a2c..9c997f042 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSampler.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSampler.java @@ -5,6 +5,14 @@ package io.opentelemetry.contrib.awsxray; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.function.Function.identity; +import static java.util.logging.Level.FINE; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.context.Context; @@ -29,17 +37,13 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; import javax.annotation.Nullable; /** Remote sampler that gets sampling configuration from AWS X-Ray. */ public final class AwsXrayRemoteSampler implements Sampler, Closeable { - static final long DEFAULT_TARGET_INTERVAL_NANOS = TimeUnit.SECONDS.toNanos(10); + static final long DEFAULT_TARGET_INTERVAL_NANOS = SECONDS.toNanos(10); private static final Logger logger = Logger.getLogger(AwsXrayRemoteSampler.class.getName()); @@ -134,7 +138,7 @@ private void getAndUpdateSampler() { initialSampler, response.getSamplingRules().stream() .map(SamplingRuleRecord::getRule) - .collect(Collectors.toList()))); + .collect(toList()))); previousRulesResponse = response; ScheduledFuture existingFetchTargetsFuture = fetchTargetsFuture; @@ -142,18 +146,17 @@ private void getAndUpdateSampler() { existingFetchTargetsFuture.cancel(false); } fetchTargetsFuture = - executor.schedule( - this::fetchTargets, DEFAULT_TARGET_INTERVAL_NANOS, TimeUnit.NANOSECONDS); + executor.schedule(this::fetchTargets, DEFAULT_TARGET_INTERVAL_NANOS, NANOSECONDS); } } catch (Throwable t) { - logger.log(Level.FINE, "Failed to update sampler", t); + logger.log(FINE, "Failed to update sampler", t); } scheduleSamplerUpdate(); } private void scheduleSamplerUpdate() { long delay = pollingIntervalNanos + jitterNanos.next(); - pollFuture = executor.schedule(this::getAndUpdateSampler, delay, TimeUnit.NANOSECONDS); + pollFuture = executor.schedule(this::getAndUpdateSampler, delay, NANOSECONDS); } /** @@ -168,7 +171,7 @@ Duration getNextSamplerUpdateScheduledDuration() { if (pollFuture == null) { return null; } - return Duration.ofNanos(pollFuture.getDelay(TimeUnit.NANOSECONDS)); + return Duration.ofNanos(pollFuture.getDelay(NANOSECONDS)); } private void fetchTargets() { @@ -181,28 +184,25 @@ private void fetchTargets() { Date now = Date.from(Instant.ofEpochSecond(0, clock.now())); List statistics = xrayRulesSampler.snapshot(now); Set requestedTargetRuleNames = - statistics.stream() - .map(SamplingStatisticsDocument::getRuleName) - .collect(Collectors.toSet()); + statistics.stream().map(SamplingStatisticsDocument::getRuleName).collect(toSet()); GetSamplingTargetsResponse response = client.getSamplingTargets(GetSamplingTargetsRequest.create(statistics)); Map targets = response.getDocuments().stream() - .collect(Collectors.toMap(SamplingTargetDocument::getRuleName, Function.identity())); + .collect(toMap(SamplingTargetDocument::getRuleName, identity())); updateInternalSamplers(xrayRulesSampler.withTargets(targets, requestedTargetRuleNames, now)); } catch (Throwable t) { // Might be a transient API failure, try again after a default interval. fetchTargetsFuture = - executor.schedule( - this::fetchTargets, DEFAULT_TARGET_INTERVAL_NANOS, TimeUnit.NANOSECONDS); + executor.schedule(this::fetchTargets, DEFAULT_TARGET_INTERVAL_NANOS, NANOSECONDS); return; } long nextTargetFetchIntervalNanos = xrayRulesSampler.nextTargetFetchTimeNanos() - clock.nanoTime(); fetchTargetsFuture = - executor.schedule(this::fetchTargets, nextTargetFetchIntervalNanos, TimeUnit.NANOSECONDS); + executor.schedule(this::fetchTargets, nextTargetFetchIntervalNanos, NANOSECONDS); } @Override diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerBuilder.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerBuilder.java index 1ce0d41c1..25485e4b0 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerBuilder.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerBuilder.java @@ -6,6 +6,8 @@ package io.opentelemetry.contrib.awsxray; import static java.util.Objects.requireNonNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.sdk.common.Clock; @@ -26,7 +28,7 @@ public final class AwsXrayRemoteSamplerBuilder { private Clock clock = Clock.getDefault(); private String endpoint = DEFAULT_ENDPOINT; @Nullable private Sampler initialSampler; - private long pollingIntervalNanos = TimeUnit.SECONDS.toNanos(DEFAULT_POLLING_INTERVAL_SECS); + private long pollingIntervalNanos = SECONDS.toNanos(DEFAULT_POLLING_INTERVAL_SECS); AwsXrayRemoteSamplerBuilder(Resource resource) { this.resource = resource; @@ -51,7 +53,7 @@ public AwsXrayRemoteSamplerBuilder setEndpoint(String endpoint) { @CanIgnoreReturnValue public AwsXrayRemoteSamplerBuilder setPollingInterval(Duration delay) { requireNonNull(delay, "delay"); - return setPollingInterval(delay.toNanos(), TimeUnit.NANOSECONDS); + return setPollingInterval(delay.toNanos(), NANOSECONDS); } /** diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplier.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplier.java index 6387aa0d7..ae4cac018 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplier.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplier.java @@ -5,7 +5,10 @@ package io.opentelemetry.contrib.awsxray; +import static io.opentelemetry.api.common.AttributeKey.stringKey; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; +import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.stream.Collectors.toMap; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -19,28 +22,27 @@ import io.opentelemetry.sdk.trace.samplers.Sampler; import io.opentelemetry.sdk.trace.samplers.SamplingDecision; import io.opentelemetry.sdk.trace.samplers.SamplingResult; +import io.opentelemetry.semconv.HttpAttributes; +import io.opentelemetry.semconv.ServerAttributes; +import io.opentelemetry.semconv.UrlAttributes; import java.time.Duration; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import java.util.regex.Pattern; -import java.util.stream.Collectors; import javax.annotation.Nullable; final class SamplingRuleApplier { // copied from AwsIncubatingAttributes private static final AttributeKey AWS_ECS_CONTAINER_ARN = - AttributeKey.stringKey("aws.ecs.container.arn"); + stringKey("aws.ecs.container.arn"); // copied from CloudIncubatingAttributes - private static final AttributeKey CLOUD_PLATFORM = - AttributeKey.stringKey("cloud.platform"); - private static final AttributeKey CLOUD_RESOURCE_ID = - AttributeKey.stringKey("cloud.resource_id"); + private static final AttributeKey CLOUD_PLATFORM = stringKey("cloud.platform"); + private static final AttributeKey CLOUD_RESOURCE_ID = stringKey("cloud.resource_id"); // copied from CloudIncubatingAttributes.CloudPlatformIncubatingValues public static final String AWS_EC2 = "aws_ec2"; public static final String AWS_ECS = "aws_ecs"; @@ -48,15 +50,19 @@ final class SamplingRuleApplier { public static final String AWS_LAMBDA = "aws_lambda"; public static final String AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk"; // copied from HttpIncubatingAttributes - private static final AttributeKey HTTP_HOST = AttributeKey.stringKey("http.host"); - private static final AttributeKey HTTP_METHOD = AttributeKey.stringKey("http.method"); - private static final AttributeKey HTTP_TARGET = AttributeKey.stringKey("http.target"); - private static final AttributeKey HTTP_URL = AttributeKey.stringKey("http.url"); + private static final AttributeKey HTTP_HOST = stringKey("http.host"); + private static final AttributeKey HTTP_METHOD = stringKey("http.method"); + private static final AttributeKey HTTP_TARGET = stringKey("http.target"); + private static final AttributeKey HTTP_URL = stringKey("http.url"); // copied from NetIncubatingAttributes - private static final AttributeKey NET_HOST_NAME = AttributeKey.stringKey("net.host.name"); + private static final AttributeKey NET_HOST_NAME = stringKey("net.host.name"); private static final Map XRAY_CLOUD_PLATFORM; + // _OTHER request method: + // https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/http.md?plain=1#L96 + private static final String _OTHER_REQUEST_METHOD = "_OTHER"; + static { Map xrayCloudPlatform = new HashMap<>(); xrayCloudPlatform.put(AWS_EC2, "AWS::EC2::Instance"); @@ -124,7 +130,7 @@ final class SamplingRuleApplier { } else { attributeMatchers = rule.getAttributes().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> toMatcher(e.getValue()))); + .collect(toMap(Map.Entry::getKey, e -> toMatcher(e.getValue()))); } urlPathMatcher = toMatcher(rule.getUrlPath()); @@ -175,25 +181,35 @@ private SamplingRuleApplier( @SuppressWarnings("deprecation") // TODO boolean matches(Attributes attributes, Resource resource) { int matchedAttributes = 0; - String httpTarget = null; - String httpUrl = null; - String httpMethod = null; - String host = null; - for (Map.Entry, Object> entry : attributes.asMap().entrySet()) { - if (entry.getKey().equals(HTTP_TARGET)) { - httpTarget = (String) entry.getValue(); - } else if (entry.getKey().equals(HTTP_URL)) { - httpUrl = (String) entry.getValue(); - } else if (entry.getKey().equals(HTTP_METHOD)) { - httpMethod = (String) entry.getValue(); - } else if (entry.getKey().equals(NET_HOST_NAME)) { - host = (String) entry.getValue(); - } else if (entry.getKey().equals(HTTP_HOST)) { - // TODO (trask) remove support for deprecated http.host attribute - host = (String) entry.getValue(); + String httpTarget = attributes.get(UrlAttributes.URL_PATH); + if (httpTarget == null) { + httpTarget = attributes.get(HTTP_TARGET); + } + + String httpUrl = attributes.get(UrlAttributes.URL_FULL); + if (httpUrl == null) { + httpUrl = attributes.get(HTTP_URL); + } + + String httpMethod = attributes.get(HttpAttributes.HTTP_REQUEST_METHOD); + if (httpMethod == null) { + httpMethod = attributes.get(HTTP_METHOD); + } + + if (httpMethod != null && httpMethod.equals(_OTHER_REQUEST_METHOD)) { + httpMethod = attributes.get(HttpAttributes.HTTP_REQUEST_METHOD_ORIGINAL); + } + + String host = attributes.get(ServerAttributes.SERVER_ADDRESS); + if (host == null) { + host = attributes.get(NET_HOST_NAME); + if (host == null) { + host = attributes.get(HTTP_HOST); } + } + for (Map.Entry, Object> entry : attributes.asMap().entrySet()) { Matcher matcher = attributeMatchers.get(entry.getKey().getKey()); if (matcher == null) { continue; @@ -300,7 +316,7 @@ SamplingRuleApplier withTarget(SamplingTargetDocument target, Date now) { } long intervalNanos = target.getIntervalSecs() != null - ? TimeUnit.SECONDS.toNanos(target.getIntervalSecs()) + ? SECONDS.toNanos(target.getIntervalSecs()) : AwsXrayRemoteSampler.DEFAULT_TARGET_INTERVAL_NANOS; long newNextSnapshotTimeNanos = clock.nanoTime() + intervalNanos; diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XrayRulesSampler.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XrayRulesSampler.java index 75977dc0f..e187da972 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XrayRulesSampler.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XrayRulesSampler.java @@ -5,6 +5,9 @@ package io.opentelemetry.contrib.awsxray; +import static java.util.logging.Level.FINE; +import static java.util.stream.Collectors.toList; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.context.Context; @@ -21,9 +24,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; final class XrayRulesSampler implements Sampler { @@ -84,7 +85,7 @@ public SamplingResult shouldSample( // In practice, X-Ray always returns a Default rule that matches all requests so it is a bug in // our code or X-Ray to reach here, fallback just in case. logger.log( - Level.FINE, + FINE, "No sampling rule matched the request. " + "This is a bug in either the OpenTelemetry SDK or X-Ray."); return fallbackSampler.shouldSample( @@ -100,7 +101,7 @@ List snapshot(Date now) { return Arrays.stream(ruleAppliers) .map(rule -> rule.snapshot(now)) .filter(Objects::nonNull) - .collect(Collectors.toList()); + .collect(toList()); } long nextTargetFetchTimeNanos() { diff --git a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XraySamplerClient.java b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XraySamplerClient.java index 5dbbbbbbf..84dbd0144 100644 --- a/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XraySamplerClient.java +++ b/aws-xray/src/main/java/io/opentelemetry/contrib/awsxray/XraySamplerClient.java @@ -25,6 +25,8 @@ package io.opentelemetry.contrib.awsxray; +import static java.util.logging.Level.FINE; + import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; @@ -37,7 +39,6 @@ import java.io.UncheckedIOException; import java.math.BigDecimal; import java.util.Date; -import java.util.logging.Level; import java.util.logging.Logger; import okhttp3.Call; import okhttp3.MediaType; @@ -51,7 +52,7 @@ final class XraySamplerClient { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() - .setSerializationInclusion(JsonInclude.Include.NON_EMPTY) + .setDefaultPropertyInclusion(JsonInclude.Include.NON_EMPTY) // AWS APIs return timestamps as floats. .registerModule( new SimpleModule().addDeserializer(Date.class, new FloatDateDeserializer())) @@ -114,7 +115,7 @@ private T executeJsonRequest(String endpoint, Object request, Class respo private static String readResponse(Response response, String endpoint) throws IOException { if (!response.isSuccessful()) { logger.log( - Level.FINE, + FINE, "Error response from " + endpoint + " code (" diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AttributePropagatingSpanProcessorTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AttributePropagatingSpanProcessorTest.java index b4f40e408..0bf394e0e 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AttributePropagatingSpanProcessorTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AttributePropagatingSpanProcessorTest.java @@ -41,7 +41,7 @@ public void setup() { } @Test - public void testAttributesPropagation() { + void testAttributesPropagation() { Span spanWithAppOnly = tracer.spanBuilder("parent").startSpan(); spanWithAppOnly.setAttribute(testKey1, "testValue1"); validateSpanAttributesInheritance(spanWithAppOnly, null, "testValue1", null); @@ -57,7 +57,7 @@ public void testAttributesPropagation() { } @Test - public void testOverrideAttributes() { + void testOverrideAttributes() { Span parentSpan = tracer.spanBuilder("parent").startSpan(); parentSpan.setAttribute(testKey1, "testValue1"); parentSpan.setAttribute(testKey2, "testValue2"); @@ -75,13 +75,13 @@ public void testOverrideAttributes() { } @Test - public void testAttributesDoNotExist() { + void testAttributesDoNotExist() { Span span = tracer.spanBuilder("parent").startSpan(); validateSpanAttributesInheritance(span, null, null, null); } @Test - public void testSpanNamePropagationBySpanKind() { + void testSpanNamePropagationBySpanKind() { for (SpanKind value : SpanKind.values()) { Span span = tracer.spanBuilder("parent").setSpanKind(value).startSpan(); if (value == SpanKind.SERVER || value == SpanKind.CONSUMER) { @@ -93,7 +93,7 @@ public void testSpanNamePropagationBySpanKind() { } @Test - public void testSpanNamePropagationWithRemoteParentSpan() { + void testSpanNamePropagationWithRemoteParentSpan() { Span remoteParent = Span.wrap( SpanContext.createFromRemoteParent( diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGeneratorTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGeneratorTest.java index 135a1eeff..4d38e89ff 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGeneratorTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributeGeneratorTest.java @@ -79,7 +79,7 @@ public void setUpMocks() { } @Test - public void testConsumerSpanWithoutAttributes() { + void testConsumerSpanWithoutAttributes() { Attributes expectedAttributes = Attributes.of( AWS_SPAN_KIND, SpanKind.CONSUMER.name(), @@ -89,7 +89,7 @@ public void testConsumerSpanWithoutAttributes() { } @Test - public void testServerSpanWithoutAttributes() { + void testServerSpanWithoutAttributes() { Attributes expectedAttributes = Attributes.of( AWS_SPAN_KIND, SpanKind.SERVER.name(), @@ -99,7 +99,7 @@ public void testServerSpanWithoutAttributes() { } @Test - public void testProducerSpanWithoutAttributes() { + void testProducerSpanWithoutAttributes() { Attributes expectedAttributes = Attributes.of( AWS_SPAN_KIND, SpanKind.PRODUCER.name(), @@ -111,7 +111,7 @@ public void testProducerSpanWithoutAttributes() { } @Test - public void testClientSpanWithoutAttributes() { + void testClientSpanWithoutAttributes() { Attributes expectedAttributes = Attributes.of( AWS_SPAN_KIND, SpanKind.CLIENT.name(), @@ -123,13 +123,13 @@ public void testClientSpanWithoutAttributes() { } @Test - public void testInternalSpan() { + void testInternalSpan() { // Spans with internal span kind should not produce any attributes. validateAttributesProducedForSpanOfKind(Attributes.empty(), SpanKind.INTERNAL); } @Test - public void testConsumerSpanWithAttributes() { + void testConsumerSpanWithAttributes() { updateResourceWithServiceName(); when(spanDataMock.getName()).thenReturn(SPAN_NAME_VALUE); @@ -142,7 +142,7 @@ public void testConsumerSpanWithAttributes() { } @Test - public void testServerSpanWithAttributes() { + void testServerSpanWithAttributes() { updateResourceWithServiceName(); when(spanDataMock.getName()).thenReturn(SPAN_NAME_VALUE); @@ -155,7 +155,7 @@ public void testServerSpanWithAttributes() { } @Test - public void testServerSpanWithNullSpanName() { + void testServerSpanWithNullSpanName() { updateResourceWithServiceName(); when(spanDataMock.getName()).thenReturn(null); @@ -168,7 +168,7 @@ public void testServerSpanWithNullSpanName() { } @Test - public void testServerSpanWithSpanNameAsHttpMethod() { + void testServerSpanWithSpanNameAsHttpMethod() { updateResourceWithServiceName(); when(spanDataMock.getName()).thenReturn("GET"); mockAttribute(HTTP_METHOD, "GET"); @@ -183,7 +183,7 @@ public void testServerSpanWithSpanNameAsHttpMethod() { } @Test - public void testServerSpanWithSpanNameWithHttpTarget() { + void testServerSpanWithSpanNameWithHttpTarget() { updateResourceWithServiceName(); when(spanDataMock.getName()).thenReturn("POST"); mockAttribute(HTTP_METHOD, "POST"); @@ -203,7 +203,7 @@ public void testServerSpanWithSpanNameWithHttpTarget() { } @Test - public void testProducerSpanWithAttributes() { + void testProducerSpanWithAttributes() { updateResourceWithServiceName(); mockAttribute(AWS_LOCAL_OPERATION, AWS_LOCAL_OPERATION_VALUE); mockAttribute(AWS_REMOTE_SERVICE, AWS_REMOTE_SERVICE_VALUE); @@ -220,7 +220,7 @@ public void testProducerSpanWithAttributes() { } @Test - public void testClientSpanWithAttributes() { + void testClientSpanWithAttributes() { updateResourceWithServiceName(); mockAttribute(AWS_LOCAL_OPERATION, AWS_LOCAL_OPERATION_VALUE); mockAttribute(AWS_REMOTE_SERVICE, AWS_REMOTE_SERVICE_VALUE); @@ -237,7 +237,7 @@ public void testClientSpanWithAttributes() { } @Test - public void testRemoteAttributesCombinations() { + void testRemoteAttributesCombinations() { // Set all expected fields to a test string, we will overwrite them in descending order to test // the priority-order logic in AwsMetricAttributeGenerator remote attribute methods. mockAttribute(AWS_REMOTE_SERVICE, "TestString"); @@ -333,7 +333,7 @@ public void testRemoteAttributesCombinations() { } @Test - public void testPeerServiceDoesOverrideOtherRemoteServices() { + void testPeerServiceDoesOverrideOtherRemoteServices() { validatePeerServiceDoesOverride(RPC_SERVICE); validatePeerServiceDoesOverride(DB_SYSTEM); validatePeerServiceDoesOverride(FAAS_INVOKED_PROVIDER); @@ -346,7 +346,7 @@ public void testPeerServiceDoesOverrideOtherRemoteServices() { } @Test - public void testPeerServiceDoesNotOverrideAwsRemoteService() { + void testPeerServiceDoesNotOverrideAwsRemoteService() { mockAttribute(AWS_REMOTE_SERVICE, "TestString"); mockAttribute(PEER_SERVICE, "PeerService"); @@ -357,7 +357,7 @@ public void testPeerServiceDoesNotOverrideAwsRemoteService() { } @Test - public void testClientSpanWithRemoteTargetAttributes() { + void testClientSpanWithRemoteTargetAttributes() { // Validate behaviour of aws bucket name attribute, then remove it. mockAttribute(AWS_BUCKET_NAME, "aws_s3_bucket_name"); validateRemoteTargetAttributes(AWS_REMOTE_TARGET, "aws_s3_bucket_name"); diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributesSpanExporterTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributesSpanExporterTest.java index aedf5fa06..8502734d7 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributesSpanExporterTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsMetricAttributesSpanExporterTest.java @@ -62,7 +62,7 @@ public void setUpMocks() { } @Test - public void testPassthroughDelegations() { + void testPassthroughDelegations() { awsMetricAttributesSpanExporter.flush(); awsMetricAttributesSpanExporter.shutdown(); awsMetricAttributesSpanExporter.close(); @@ -72,7 +72,7 @@ public void testPassthroughDelegations() { } @Test - public void testExportDelegationWithoutAttributeOrModification() { + void testExportDelegationWithoutAttributeOrModification() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_NO_ATTRIBUTES); SpanData spanDataMock = buildSpanDataMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_NO_ATTRIBUTES); @@ -88,7 +88,7 @@ public void testExportDelegationWithoutAttributeOrModification() { } @Test - public void testExportDelegationWithAttributeButWithoutModification() { + void testExportDelegationWithAttributeButWithoutModification() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_ATTRIBUTES); SpanData spanDataMock = buildSpanDataMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_NO_ATTRIBUTES); @@ -104,7 +104,7 @@ public void testExportDelegationWithAttributeButWithoutModification() { } @Test - public void testExportDelegationWithoutAttributeButWithModification() { + void testExportDelegationWithoutAttributeButWithModification() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_NO_ATTRIBUTES); SpanData spanDataMock = buildSpanDataMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); @@ -124,7 +124,7 @@ public void testExportDelegationWithoutAttributeButWithModification() { } @Test - public void testExportDelegationWithAttributeAndModification() { + void testExportDelegationWithAttributeAndModification() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_ATTRIBUTES); SpanData spanDataMock = buildSpanDataMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); @@ -146,7 +146,7 @@ public void testExportDelegationWithAttributeAndModification() { } @Test - public void testExportDelegationWithMultipleSpans() { + void testExportDelegationWithMultipleSpans() { Attributes spanAttributes1 = buildSpanAttributes(CONTAINS_NO_ATTRIBUTES); SpanData spanDataMock1 = buildSpanDataMock(spanAttributes1); Attributes metricAttributes1 = buildMetricAttributes(CONTAINS_NO_ATTRIBUTES); @@ -185,7 +185,7 @@ public void testExportDelegationWithMultipleSpans() { } @Test - public void testOverridenAttributes() { + void testOverridenAttributes() { Attributes spanAttributes = Attributes.of( AttributeKey.stringKey("key1"), @@ -217,7 +217,7 @@ public void testOverridenAttributes() { } @Test - public void testExportDelegatingSpanDataBehaviour() { + void testExportDelegatingSpanDataBehaviour() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_ATTRIBUTES); SpanData spanDataMock = buildSpanDataMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessorTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessorTest.java index 0836f5a8e..5d10a6a3d 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessorTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsSpanMetricsProcessorTest.java @@ -101,13 +101,13 @@ public void setUpMocks() { } @Test - public void testIsRequired() { + void testIsRequired() { assertThat(awsSpanMetricsProcessor.isStartRequired()).isFalse(); assertThat(awsSpanMetricsProcessor.isEndRequired()).isTrue(); } @Test - public void testStartDoesNothingToSpan() { + void testStartDoesNothingToSpan() { Context parentContextMock = mock(Context.class); ReadWriteSpan spanMock = mock(ReadWriteSpan.class); awsSpanMetricsProcessor.onStart(parentContextMock, spanMock); @@ -115,7 +115,7 @@ public void testStartDoesNothingToSpan() { } @Test - public void testTearDown() { + void testTearDown() { assertThat(awsSpanMetricsProcessor.shutdown()).isEqualTo(CompletableResultCode.ofSuccess()); assertThat(awsSpanMetricsProcessor.forceFlush()).isEqualTo(CompletableResultCode.ofSuccess()); @@ -128,7 +128,7 @@ public void testTearDown() { * AwsSpanMetricsProcessor's onEnd method pertaining to metrics generation. */ @Test - public void testOnEndMetricsGenerationWithoutSpanAttributes() { + void testOnEndMetricsGenerationWithoutSpanAttributes() { Attributes spanAttributes = buildSpanAttributes(CONTAINS_NO_ATTRIBUTES); ReadableSpan readableSpanMock = buildReadableSpanMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); @@ -141,7 +141,7 @@ public void testOnEndMetricsGenerationWithoutSpanAttributes() { } @Test - public void testOnEndMetricsGenerationWithoutMetricAttributes() { + void testOnEndMetricsGenerationWithoutMetricAttributes() { Attributes spanAttributes = Attributes.of(HTTP_STATUS_CODE, 500L); ReadableSpan readableSpanMock = buildReadableSpanMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_NO_ATTRIBUTES); @@ -154,7 +154,7 @@ public void testOnEndMetricsGenerationWithoutMetricAttributes() { } @Test - public void testOnEndMetricsGenerationWithoutEndRequired() { + void testOnEndMetricsGenerationWithoutEndRequired() { Attributes spanAttributes = Attributes.of(HTTP_STATUS_CODE, 500L); ReadableSpan readableSpanMock = buildReadableSpanMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); @@ -167,7 +167,7 @@ public void testOnEndMetricsGenerationWithoutEndRequired() { } @Test - public void testOnEndMetricsGenerationWithLatency() { + void testOnEndMetricsGenerationWithLatency() { Attributes spanAttributes = Attributes.of(HTTP_STATUS_CODE, 200L); ReadableSpan readableSpanMock = buildReadableSpanMock(spanAttributes); Attributes metricAttributes = buildMetricAttributes(CONTAINS_ATTRIBUTES); @@ -182,7 +182,7 @@ public void testOnEndMetricsGenerationWithLatency() { } @Test - public void testOnEndMetricsGenerationWithAwsStatusCodes() { + void testOnEndMetricsGenerationWithAwsStatusCodes() { validateMetricsGeneratedForAwsStatusCode(399L, ExpectedStatusMetric.NEITHER); validateMetricsGeneratedForAwsStatusCode(400L, ExpectedStatusMetric.ERROR); validateMetricsGeneratedForAwsStatusCode(499L, ExpectedStatusMetric.ERROR); @@ -192,7 +192,7 @@ public void testOnEndMetricsGenerationWithAwsStatusCodes() { } @Test - public void testOnEndMetricsGenerationWithStatusCodes() { + void testOnEndMetricsGenerationWithStatusCodes() { // Invalid HTTP status codes validateMetricsGeneratedForHttpStatusCode(null, ExpectedStatusMetric.NEITHER); diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerTest.java index 4e5cd13bc..d45e00ad2 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/AwsXrayRemoteSamplerTest.java @@ -196,7 +196,6 @@ void testJitterTruncation() { .setEndpoint(server.httpUri().toString()) .setPollingInterval(Duration.ofMinutes(5)) .build()) { - assertThat(samplerWithLongerPollingInterval.getNextSamplerUpdateScheduledDuration()).isNull(); await() .untilAsserted( () -> { diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/ResourceHolderTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/ResourceHolderTest.java index 1140abc5f..f81f3d370 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/ResourceHolderTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/ResourceHolderTest.java @@ -21,11 +21,11 @@ * Unit tests for {@link ResourceHolder}. Note that there isn't a great way to test the "default" * fallback logic, as when the test suite is run, the customize logic appears to be invoked. */ -public class ResourceHolderTest { +class ResourceHolderTest { @Test @SuppressWarnings("unchecked") - public void testCustomized() { + void testCustomized() { Resource customizedResource = Resource.create(Attributes.empty()); AutoConfigurationCustomizer mockCustomizer = mock(AutoConfigurationCustomizer.class); ResourceHolder resourceHolder = new ResourceHolder(); diff --git a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplierTest.java b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplierTest.java index bc7bdd3e7..920a5ffd4 100644 --- a/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplierTest.java +++ b/aws-xray/src/test/java/io/opentelemetry/contrib/awsxray/SamplingRuleApplierTest.java @@ -29,6 +29,9 @@ import io.opentelemetry.sdk.testing.time.TestClock; import io.opentelemetry.sdk.trace.samplers.SamplingDecision; import io.opentelemetry.sdk.trace.samplers.SamplingResult; +import io.opentelemetry.semconv.HttpAttributes; +import io.opentelemetry.semconv.ServerAttributes; +import io.opentelemetry.semconv.UrlAttributes; import io.opentelemetry.semconv.incubating.CloudIncubatingAttributes; import java.io.IOException; import java.io.UncheckedIOException; @@ -72,6 +75,15 @@ class ExactMatch { .put(AttributeKey.longKey("speed"), 10) .build(); + private final Attributes stableSemConvAttributes = + Attributes.builder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "GET") + .put(ServerAttributes.SERVER_ADDRESS, "opentelemetry.io") + .put(UrlAttributes.URL_PATH, "/instrument-me") + .put(AttributeKey.stringKey("animal"), "cat") + .put(AttributeKey.longKey("speed"), 10) + .build(); + // FixedRate set to 1.0 in rule and no reservoir @Test void fixedRateAlwaysSample() { @@ -120,6 +132,21 @@ void matches() { .isTrue(); } + @Test + void matchesURLFullStableSemConv() { + assertThat(applier.matches(stableSemConvAttributes, resource)).isTrue(); + + // url.full works too + assertThat( + applier.matches( + attributes.toBuilder() + .remove(HTTP_TARGET) + .put(UrlAttributes.URL_FULL, "scheme://host:port/instrument-me") + .build(), + resource)) + .isTrue(); + } + @Test void serviceNameNotMatch() { assertThat( @@ -140,6 +167,15 @@ void methodNotMatch() { assertThat(applier.matches(attributes, resource)).isFalse(); } + @Test + void methodStableSemConvNotMatch() { + Attributes attributes = + this.stableSemConvAttributes.toBuilder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "POST") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + } + @Test void hostNotMatch() { // Replacing dot with character makes sure we're not accidentally treating dot as regex @@ -177,6 +213,36 @@ void pathNotMatch() { assertThat(applier.matches(attributes, resource)).isFalse(); } + @Test + void pathStableSemConvNotMatch() { + Attributes attributes = + this.stableSemConvAttributes.toBuilder() + .put(UrlAttributes.URL_PATH, "/instrument-you") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = + this.stableSemConvAttributes.toBuilder() + .remove(UrlAttributes.URL_PATH) + .put(UrlAttributes.URL_FULL, "scheme://host:port/instrument-you") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = + this.stableSemConvAttributes.toBuilder() + .remove(UrlAttributes.URL_PATH) + .put(UrlAttributes.URL_FULL, "scheme://host:port") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + + // Correct path, but we ignore anyways since the URL is malformed per spec, scheme is always + // present. + attributes = + this.stableSemConvAttributes.toBuilder() + .remove(UrlAttributes.URL_PATH) + .put(UrlAttributes.URL_FULL, "host:port/instrument-me") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + } + @Test void attributeNotMatch() { Attributes attributes = @@ -235,6 +301,15 @@ class WildcardMatch { .put(AttributeKey.longKey("speed"), 10) .build(); + private final Attributes stableSemConvAttributes = + Attributes.builder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "GET") + .put(ServerAttributes.SERVER_ADDRESS, "opentelemetry.io") + .put(UrlAttributes.URL_PATH, "/instrument-me?foo=bar&cat=meow") + .put(AttributeKey.stringKey("animal"), "cat") + .put(AttributeKey.longKey("speed"), 10) + .build(); + // FixedRate set to 0.0 in rule and no reservoir @Test void fixedRateNeverSample() { @@ -317,6 +392,36 @@ void methodNotMatch() { assertThat(applier.matches(attributes, resource)).isFalse(); } + @Test + void stableSemConvMethodMatches() { + Attributes attributes = + this.stableSemConvAttributes.toBuilder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "BADGETGOOD") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + stableSemConvAttributes.toBuilder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "BADGET") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + stableSemConvAttributes.toBuilder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "GETGET") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + } + + @Test + void stableSemConvMethodNotMatch() { + Attributes attributes = + stableSemConvAttributes.toBuilder() + .put(HttpAttributes.HTTP_REQUEST_METHOD, "POST") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = removeAttribute(stableSemConvAttributes, HttpAttributes.HTTP_REQUEST_METHOD); + assertThat(applier.matches(attributes, resource)).isFalse(); + } + @Test void hostMatches() { Attributes attributes = @@ -345,6 +450,56 @@ void hostNotMatch() { assertThat(applier.matches(attributes, resource)).isFalse(); } + @Test + void stableSemConvHostMatches() { + Attributes attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "alpha.opentelemetry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opfdnqtelemetry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opentglemetry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opentglemry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opentglemrz.io") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + } + + @Test + void stableSemConvHostNotMatch() { + Attributes attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opentelemetryfio") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "opentgalemetry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = + this.stableSemConvAttributes.toBuilder() + .put(ServerAttributes.SERVER_ADDRESS, "alpha.oentelemetry.io") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = removeAttribute(this.stableSemConvAttributes, ServerAttributes.SERVER_ADDRESS); + assertThat(applier.matches(attributes, resource)).isFalse(); + } + @Test void pathMatches() { Attributes attributes = @@ -368,6 +523,37 @@ void pathNotMatch() { assertThat(applier.matches(attributes, resource)).isFalse(); } + @Test + void pathStableSemConvMatches() { + Attributes attributes = + stableSemConvAttributes.toBuilder() + .put(UrlAttributes.URL_PATH, "/instrument-me?foo=bar&cat=") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + // Deceptive question mark, it's actually a wildcard :-) + attributes = + stableSemConvAttributes.toBuilder() + .put(UrlAttributes.URL_PATH, "/instrument-meafoo=bar&cat=") + .build(); + assertThat(applier.matches(attributes, resource)).isTrue(); + } + + @Test + void pathStableSemConvNotMatch() { + Attributes attributes = + stableSemConvAttributes.toBuilder() + .put(UrlAttributes.URL_PATH, "/instrument-mea?foo=bar&cat=") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = + stableSemConvAttributes.toBuilder() + .put(UrlAttributes.URL_PATH, "foo/instrument-meafoo=bar&cat=") + .build(); + assertThat(applier.matches(attributes, resource)).isFalse(); + attributes = removeAttribute(stableSemConvAttributes, UrlAttributes.URL_PATH); + assertThat(applier.matches(attributes, resource)).isFalse(); + } + @Test void attributeMatches() { Attributes attributes = diff --git a/azure-resources/build.gradle.kts b/azure-resources/build.gradle.kts index 33af12d84..05c032d70 100644 --- a/azure-resources/build.gradle.kts +++ b/azure-resources/build.gradle.kts @@ -5,8 +5,8 @@ plugins { id("maven-publish") } -description = "OpenTelemetry GCP Resources Support" -otelJava.moduleName.set("io.opentelemetry.contrib.gcp.resource") +description = "OpenTelemetry Azure Resources Support" +otelJava.moduleName.set("io.opentelemetry.contrib.azure.resource") // enable publishing to maven local java { @@ -15,6 +15,7 @@ java { dependencies { api("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") api("io.opentelemetry:opentelemetry-sdk") implementation("io.opentelemetry.semconv:opentelemetry-semconv") @@ -26,12 +27,24 @@ dependencies { testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") + testImplementation("io.opentelemetry:opentelemetry-exporter-logging") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") -// testImplementation("org.mockito:mockito-core") testImplementation("com.google.guava:guava") testImplementation("org.junit.jupiter:junit-jupiter-api") testImplementation("org.assertj:assertj-core") testImplementation("com.linecorp.armeria:armeria-junit5") } + +tasks { + withType().configureEach { + environment( + "WEBSITE_SITE_NAME" to "my-function", + "FUNCTIONS_EXTENSION_VERSION" to "1.2.3" + ) + jvmArgs("-Dotel.experimental.config.file=${project.projectDir.resolve("src/test/resources/declarative-config.yaml")}") + } +} diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAksResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAksResourceProvider.java index 4d3b92280..987492dd8 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAksResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAksResourceProvider.java @@ -8,14 +8,13 @@ import static io.opentelemetry.contrib.azure.resource.IncubatingAttributes.CloudPlatformIncubatingValues.AZURE_AKS; import static io.opentelemetry.contrib.azure.resource.IncubatingAttributes.K8S_CLUSTER_NAME; -import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.resources.Resource; import java.util.HashMap; import java.util.Map; import java.util.Optional; import java.util.function.Supplier; -public class AzureAksResourceProvider extends CloudResourceProvider { +public final class AzureAksResourceProvider extends CloudResourceProvider { private static final Map COMPUTE_MAPPING = new HashMap<>(); @@ -56,12 +55,12 @@ public AzureAksResourceProvider() { @Override public int order() { // run after the fast cloud resource providers that only check environment variables - // and before the AKS provider + // and before the VM provider return 100; } @Override - public Resource createResource(ConfigProperties configProperties) { + public Resource createResource() { if (environment.get(KUBERNETES_SERVICE_HOST) == null) { return Resource.empty(); } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProvider.java index 71cf699e6..3a658428a 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProvider.java @@ -12,19 +12,18 @@ import static io.opentelemetry.contrib.azure.resource.IncubatingAttributes.HOST_ID; import static io.opentelemetry.contrib.azure.resource.IncubatingAttributes.SERVICE_INSTANCE_ID; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; +import static java.util.Objects.requireNonNull; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.internal.StringUtils; -import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.resources.Resource; import java.util.HashMap; import java.util.Map; -import java.util.Objects; import javax.annotation.Nullable; -public class AzureAppServiceResourceProvider extends CloudResourceProvider { +public final class AzureAppServiceResourceProvider extends CloudResourceProvider { static final AttributeKey AZURE_APP_SERVICE_STAMP_RESOURCE_ATTRIBUTE = AttributeKey.stringKey("azure.app.service.stamp"); @@ -60,7 +59,7 @@ public AzureAppServiceResourceProvider() { } @Override - public Resource createResource(ConfigProperties config) { + public Resource createResource() { return Resource.create(getAttributes()); } @@ -69,7 +68,7 @@ public Attributes getAttributes() { if (detect != AzureEnvVarPlatform.APP_SERVICE) { return Attributes.empty(); } - String name = Objects.requireNonNull(env.get(WEBSITE_SITE_NAME)); + String name = requireNonNull(env.get(WEBSITE_SITE_NAME)); AttributesBuilder builder = AzureVmResourceProvider.azureAttributeBuilder(AZURE_APP_SERVICE); builder.put(SERVICE_NAME, name); diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProvider.java index 2f641148f..014ec5b41 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProvider.java @@ -12,12 +12,11 @@ import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.resources.Resource; import java.util.HashMap; import java.util.Map; -public class AzureContainersResourceProvider extends CloudResourceProvider { +public final class AzureContainersResourceProvider extends CloudResourceProvider { static final String CONTAINER_APP_NAME = "CONTAINER_APP_NAME"; @@ -45,7 +44,7 @@ public AzureContainersResourceProvider() { } @Override - public Resource createResource(ConfigProperties config) { + public Resource createResource() { return Resource.create(getAttributes()); } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProvider.java index 1b86c6212..d98a41be4 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProvider.java @@ -15,12 +15,11 @@ import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.resources.Resource; import java.util.HashMap; import java.util.Map; -public class AzureFunctionsResourceProvider extends CloudResourceProvider { +public final class AzureFunctionsResourceProvider extends CloudResourceProvider { static final String FUNCTIONS_VERSION = "FUNCTIONS_EXTENSION_VERSION"; private static final String FUNCTIONS_MEM_LIMIT = "WEBSITE_MEMORY_LIMIT_MB"; @@ -47,7 +46,7 @@ public AzureFunctionsResourceProvider() { } @Override - public Resource createResource(ConfigProperties config) { + public Resource createResource() { return Resource.create(getAttributes()); } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureMetadataService.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureMetadataService.java index d5bf44520..a93413a24 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureMetadataService.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureMetadataService.java @@ -5,12 +5,13 @@ package io.opentelemetry.contrib.azure.resource; +import static java.util.Objects.requireNonNull; + import com.fasterxml.jackson.core.JsonFactory; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.time.Duration; -import java.util.Objects; import java.util.Optional; import java.util.function.Supplier; import java.util.logging.Level; @@ -19,9 +20,11 @@ import okhttp3.Request; import okhttp3.Response; -public class AzureMetadataService { +public final class AzureMetadataService { static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final URL METADATA_URL; + private static final Duration TIMEOUT = Duration.ofSeconds(5); + private static final Logger logger = Logger.getLogger(AzureMetadataService.class.getName()); static { try { @@ -31,12 +34,6 @@ public class AzureMetadataService { } } - private AzureMetadataService() {} - - private static final Duration TIMEOUT = Duration.ofSeconds(5); - - private static final Logger logger = Logger.getLogger(AzureMetadataService.class.getName()); - static Supplier> defaultClient() { return () -> fetchMetadata(METADATA_URL); } @@ -66,10 +63,12 @@ static Optional fetchMetadata(URL url) { return Optional.empty(); } - return Optional.of(Objects.requireNonNull(response.body()).string()); + return Optional.of(requireNonNull(response.body()).string()); } catch (IOException e) { logger.log(Level.FINE, "Failed to fetch Azure VM metadata", e); return Optional.empty(); } } + + private AzureMetadataService() {} } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureResourceDetector.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureResourceDetector.java new file mode 100644 index 000000000..27da91c4c --- /dev/null +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureResourceDetector.java @@ -0,0 +1,53 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.azure.resource; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.resources.ResourceBuilder; + +public final class AzureResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "azure"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + Builder builder = new Builder(); + builder.add(new AzureFunctionsResourceProvider()); + builder.add(new AzureAppServiceResourceProvider()); + builder.add(new AzureContainersResourceProvider()); + builder.addIfEmpty(new AzureAksResourceProvider()); + builder.addIfEmpty(new AzureVmResourceProvider()); + return builder.builder.build(); + } + + private static class Builder { + final ResourceBuilder builder = Resource.builder(); + int attributesCount = 0; + + private void add(CloudResourceProvider provider) { + Attributes attributes = provider.createResource().getAttributes(); + builder.putAll(attributes); + attributesCount += attributes.size(); + } + + private void addIfEmpty(CloudResourceProvider provider) { + if (attributesCount == 0) { + add(provider); + } + } + } +} diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureVmResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureVmResourceProvider.java index 139b808d4..2a87a0488 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureVmResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/AzureVmResourceProvider.java @@ -22,7 +22,6 @@ import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.resources.Resource; import java.io.IOException; import java.util.HashMap; @@ -35,21 +34,7 @@ import java.util.logging.Logger; import org.jetbrains.annotations.NotNull; -public class AzureVmResourceProvider extends CloudResourceProvider { - - static class Entry { - final AttributeKey key; - final Function transform; - - Entry(AttributeKey key) { - this(key, Function.identity()); - } - - Entry(AttributeKey key, Function transform) { - this.key = key; - this.transform = transform; - } - } +public final class AzureVmResourceProvider extends CloudResourceProvider { private static final Map COMPUTE_MAPPING = new HashMap<>(); @@ -88,7 +73,7 @@ public int order() { } @Override - public Resource createResource(ConfigProperties config) { + public Resource createResource() { return client .get() .map(body -> parseMetadata(body, COMPUTE_MAPPING, AZURE_VM)) @@ -162,4 +147,18 @@ private static void consumeJson(JsonParser parser, BiConsumer co consumer.accept(parser.currentName(), parser.nextTextValue()); } } + + static class Entry { + final AttributeKey key; + final Function transform; + + Entry(AttributeKey key) { + this(key, Function.identity()); + } + + Entry(AttributeKey key, Function transform) { + this.key = key; + this.transform = transform; + } + } } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/CloudResourceProvider.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/CloudResourceProvider.java index 181a22889..3c7fcc862 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/CloudResourceProvider.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/CloudResourceProvider.java @@ -17,4 +17,12 @@ public abstract class CloudResourceProvider implements ConditionalResourceProvid public final boolean shouldApply(ConfigProperties config, Resource existing) { return existing.getAttribute(CLOUD_PROVIDER) == null; } + + @Override + public final Resource createResource(ConfigProperties config) { + // not using config in any providers + return createResource(); + } + + abstract Resource createResource(); } diff --git a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/IncubatingAttributes.java b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/IncubatingAttributes.java index 491cb99a8..524ca8727 100644 --- a/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/IncubatingAttributes.java +++ b/azure-resources/src/main/java/io/opentelemetry/contrib/azure/resource/IncubatingAttributes.java @@ -24,10 +24,10 @@ final class IncubatingAttributes { AttributeKey.stringKey("cloud.resource_id"); public static final class CloudPlatformIncubatingValues { - public static final String AZURE_VM = "azure_vm"; - public static final String AZURE_AKS = "azure_aks"; - public static final String AZURE_FUNCTIONS = "azure_functions"; - public static final String AZURE_APP_SERVICE = "azure_app_service"; + public static final String AZURE_VM = "azure.vm"; + public static final String AZURE_AKS = "azure.aks"; + public static final String AZURE_FUNCTIONS = "azure.functions"; + public static final String AZURE_APP_SERVICE = "azure.app_service"; private CloudPlatformIncubatingValues() {} } diff --git a/azure-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider b/azure-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider new file mode 100644 index 000000000..373780ff0 --- /dev/null +++ b/azure-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider @@ -0,0 +1 @@ +io.opentelemetry.contrib.azure.resource.AzureResourceDetector diff --git a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProviderTest.java b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProviderTest.java index 20d856cba..90a7d27b5 100644 --- a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProviderTest.java +++ b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureAppServiceResourceProviderTest.java @@ -5,6 +5,7 @@ package io.opentelemetry.contrib.azure.resource; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER; @@ -16,7 +17,6 @@ import com.google.common.collect.ImmutableMap; import io.opentelemetry.sdk.testing.assertj.AttributesAssert; -import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; import java.util.HashMap; import java.util.Map; import org.jetbrains.annotations.NotNull; @@ -48,7 +48,7 @@ void defaultValues() { createResource(DEFAULT_ENV_VARS) .containsEntry(SERVICE_NAME, TEST_WEBSITE_SITE_NAME) .containsEntry(CLOUD_PROVIDER, "azure") - .containsEntry(CLOUD_PLATFORM, "azure_app_service") + .containsEntry(CLOUD_PLATFORM, "azure.app_service") .containsEntry( CLOUD_RESOURCE_ID, "/subscriptions/TEST_WEBSITE_OWNER_NAME/resourceGroups/TEST_WEBSITE_RESOURCE_GROUP/providers/Microsoft.Web/sites/TEST_WEBSITE_SITE_NAME") @@ -98,7 +98,7 @@ void isFunction() { @NotNull private static AttributesAssert createResource(Map map) { - return OpenTelemetryAssertions.assertThat( + return assertThat( new AzureAppServiceResourceProvider(map).createResource(null).getAttributes()); } } diff --git a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProviderTest.java b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProviderTest.java index 5ac1a4be7..082ceaf6a 100644 --- a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProviderTest.java +++ b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureContainersResourceProviderTest.java @@ -5,6 +5,7 @@ package io.opentelemetry.contrib.azure.resource; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_NAME; import static io.opentelemetry.semconv.ServiceAttributes.SERVICE_VERSION; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM; @@ -13,7 +14,6 @@ import com.google.common.collect.ImmutableMap; import io.opentelemetry.sdk.testing.assertj.AttributesAssert; -import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; import java.util.HashMap; import java.util.Map; import org.jetbrains.annotations.NotNull; @@ -50,7 +50,7 @@ void isNotContainer() { @NotNull private static AttributesAssert createResource(Map map) { - return OpenTelemetryAssertions.assertThat( + return assertThat( new AzureContainersResourceProvider(map).createResource(null).getAttributes()); } } diff --git a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProviderTest.java b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProviderTest.java index 520e44543..b680088e8 100644 --- a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProviderTest.java +++ b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/AzureFunctionsResourceProviderTest.java @@ -5,6 +5,7 @@ package io.opentelemetry.contrib.azure.resource; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER; import static io.opentelemetry.semconv.incubating.FaasIncubatingAttributes.FAAS_INSTANCE; @@ -14,7 +15,6 @@ import com.google.common.collect.ImmutableMap; import io.opentelemetry.sdk.testing.assertj.AttributesAssert; -import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; import java.util.HashMap; import java.util.Map; import org.jetbrains.annotations.NotNull; @@ -38,7 +38,7 @@ class AzureFunctionsResourceProviderTest { void defaultValues() { createResource(DEFAULT_ENV_VARS) .containsEntry(CLOUD_PROVIDER, "azure") - .containsEntry(CLOUD_PLATFORM, "azure_functions") + .containsEntry(CLOUD_PLATFORM, "azure.functions") .containsEntry(FAAS_NAME, TEST_WEBSITE_SITE_NAME) .containsEntry(FAAS_VERSION, TEST_FUNCTION_VERSION) .containsEntry(FAAS_INSTANCE, TEST_WEBSITE_INSTANCE_ID) @@ -55,7 +55,6 @@ void isNotFunction() { @NotNull private static AttributesAssert createResource(Map map) { - return OpenTelemetryAssertions.assertThat( - new AzureFunctionsResourceProvider(map).createResource(null).getAttributes()); + return assertThat(new AzureFunctionsResourceProvider(map).createResource(null).getAttributes()); } } diff --git a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/MetadataBasedResourceProviderTest.java b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/MetadataBasedResourceProviderTest.java index d827e8fd4..3d65c1e0a 100644 --- a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/MetadataBasedResourceProviderTest.java +++ b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/MetadataBasedResourceProviderTest.java @@ -5,6 +5,7 @@ package io.opentelemetry.contrib.azure.resource; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PLATFORM; import static io.opentelemetry.semconv.incubating.CloudIncubatingAttributes.CLOUD_PROVIDER; @@ -16,7 +17,6 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.testing.assertj.AttributesAssert; -import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; import java.io.IOException; import java.io.InputStreamReader; import java.net.MalformedURLException; @@ -28,7 +28,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; -public abstract class MetadataBasedResourceProviderTest { +abstract class MetadataBasedResourceProviderTest { @RegisterExtension public static final MockWebServerExtension server = new MockWebServerExtension(); @@ -47,7 +47,7 @@ private AttributesAssert mockServerResponse() { @NotNull private AttributesAssert createResource(Supplier> client) { Resource resource = getResourceProvider(client).createResource(null); - return OpenTelemetryAssertions.assertThat(resource.getAttributes()); + return assertThat(resource.getAttributes()); } @NotNull @@ -79,30 +79,30 @@ protected static String okResponse() { } @Test - public void successFromFile() { + void successFromFile() { assertDefaultAttributes(createResource(() -> Optional.of(okResponse()))); } @Test - public void successFromMockServer() { + void successFromMockServer() { server.enqueue(HttpResponse.of(MediaType.JSON, okResponse())); assertDefaultAttributes(mockServerResponse()); } @Test - public void responseNotFound() { + void responseNotFound() { server.enqueue(HttpResponse.of(HttpStatus.NOT_FOUND)); mockServerResponse().isEmpty(); } @Test - public void responseEmpty() { + void responseEmpty() { server.enqueue(HttpResponse.of("")); assertOnlyProvider(mockServerResponse()); } @Test - public void responseEmptyJson() { + void responseEmptyJson() { server.enqueue(HttpResponse.of("{}")); assertOnlyProvider(mockServerResponse()); } diff --git a/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/ResourceComponentProviderTest.java b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/ResourceComponentProviderTest.java new file mode 100644 index 000000000..0f67f3919 --- /dev/null +++ b/azure-resources/src/test/java/io/opentelemetry/contrib/azure/resource/ResourceComponentProviderTest.java @@ -0,0 +1,35 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.azure.resource; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; +import io.opentelemetry.semconv.incubating.CloudIncubatingAttributes; +import org.assertj.core.api.InstanceOfAssertFactory; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + + @Test + void endToEnd() { + assertThat( + AutoConfiguredOpenTelemetrySdk.builder() + .build() + .getOpenTelemetrySdk() + .getSdkTracerProvider()) + .extracting("sharedState") + .extracting("resource") + .extracting( + "attributes", + new InstanceOfAssertFactory<>(Attributes.class, OpenTelemetryAssertions::assertThat)) + .containsEntry( + CloudIncubatingAttributes.CLOUD_PROVIDER, + CloudIncubatingAttributes.CloudProviderIncubatingValues.AZURE); + } +} diff --git a/azure-resources/src/test/resources/declarative-config.yaml b/azure-resources/src/test/resources/declarative-config.yaml new file mode 100644 index 000000000..748dbddd9 --- /dev/null +++ b/azure-resources/src/test/resources/declarative-config.yaml @@ -0,0 +1,10 @@ +file_format: "1.0-rc.1" +resource: + detection/development: + detectors: + - azure: +tracer_provider: + processors: + - simple: + exporter: + console: diff --git a/baggage-processor/README.md b/baggage-processor/README.md index 44719770b..10e98910d 100644 --- a/baggage-processor/README.md +++ b/baggage-processor/README.md @@ -25,6 +25,36 @@ processors through configuration. | `otel.java.experimental.span-attributes.copy-from-baggage.include` | Add baggage entries as span attributes, e.g. `key1,key2` or `*` to add all baggage items as keys. | | `otel.java.experimental.log-attributes.copy-from-baggage.include` | Add baggage entries as log attributes, e.g. `key1,key2` or `*` to add all baggage items as keys. | +### Usage with declarative configuration + +You can configure the baggage span and log record processors using declarative YAML configuration with the OpenTelemetry SDK. + +For the tracer provider (span processor): + +```yaml +file_format: 1.0-rc.1 +tracer_provider: + processors: + - baggage: + included: [foo] + excluded: [bar] +``` + +For the logger provider (log record processor): + +```yaml +file_format: 1.0-rc.1 +logger_provider: + processors: + - baggage: + included: [foo] + excluded: [bar] +``` + +This will configure the respective processor to include baggage keys listed in `included` and +exclude those in `excluded` as explained in +[Properties which pattern matching](https://github.com/open-telemetry/opentelemetry-configuration/blob/main/CONTRIBUTING.md#properties-which-pattern-matching). + ### Usage through programmatic activation Add the span and log processor when configuring the tracer and logger providers. diff --git a/baggage-processor/build.gradle.kts b/baggage-processor/build.gradle.kts index 017158399..0ff9dd52a 100644 --- a/baggage-processor/build.gradle.kts +++ b/baggage-processor/build.gradle.kts @@ -8,11 +8,19 @@ description = "OpenTelemetry Baggage Span Processor" otelJava.moduleName.set("io.opentelemetry.contrib.baggage.processor") dependencies { + annotationProcessor("com.google.auto.service:auto-service") + compileOnly("com.google.auto.service:auto-service-annotations") api("io.opentelemetry:opentelemetry-api") api("io.opentelemetry:opentelemetry-sdk") implementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") + compileOnly("io.opentelemetry:opentelemetry-sdk-common") + compileOnly("io.opentelemetry:opentelemetry-sdk-extension-incubator") + testAnnotationProcessor("com.google.auto.service:auto-service") + testCompileOnly("com.google.auto.service:auto-service-annotations") + testImplementation("io.opentelemetry:opentelemetry-sdk-common") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") testImplementation("org.mockito:mockito-inline") testImplementation("com.google.guava:guava") diff --git a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProvider.java b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProvider.java new file mode 100644 index 000000000..be40ab97c --- /dev/null +++ b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.baggage.processor; + +import com.google.auto.service.AutoService; +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.internal.IncludeExcludePredicate; +import io.opentelemetry.sdk.logs.LogRecordProcessor; + +@SuppressWarnings("rawtypes") +@AutoService(ComponentProvider.class) +public class BaggageLogRecordComponentProvider implements ComponentProvider { + @Override + public String getName() { + return "baggage"; + } + + @Override + public LogRecordProcessor create(DeclarativeConfigProperties config) { + return new BaggageLogRecordProcessor( + IncludeExcludePredicate.createPatternMatching( + config.getScalarList("included", String.class), + config.getScalarList("excluded", String.class))); + } + + @Override + public Class getType() { + return LogRecordProcessor.class; + } +} diff --git a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordProcessor.java b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordProcessor.java index 4e8c91505..474f4caef 100644 --- a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordProcessor.java +++ b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordProcessor.java @@ -16,15 +16,7 @@ * This log record processor copies attributes stored in {@link Baggage} into each newly created log * record. */ -public class BaggageLogRecordProcessor implements LogRecordProcessor { - - /** - * Creates a new {@link BaggageLogRecordProcessor} that copies all baggage entries into the newly - * created log record. - */ - public static BaggageLogRecordProcessor allowAllBaggageKeys() { - return new BaggageLogRecordProcessor(baggageKey -> true); - } +public final class BaggageLogRecordProcessor implements LogRecordProcessor { private final Predicate baggageKeyPredicate; @@ -36,6 +28,14 @@ public BaggageLogRecordProcessor(Predicate baggageKeyPredicate) { this.baggageKeyPredicate = baggageKeyPredicate; } + /** + * Creates a new {@link BaggageLogRecordProcessor} that copies all baggage entries into the newly + * created log record. + */ + public static BaggageLogRecordProcessor allowAllBaggageKeys() { + return new BaggageLogRecordProcessor(baggageKey -> true); + } + @Override public void onEmit(Context context, ReadWriteLogRecord logRecord) { Baggage.fromContext(context) diff --git a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizer.java b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizer.java index da35512a3..2e07722e6 100644 --- a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizer.java +++ b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizer.java @@ -5,6 +5,7 @@ package io.opentelemetry.contrib.baggage.processor; +import com.google.auto.service.AutoService; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; @@ -12,7 +13,8 @@ import io.opentelemetry.sdk.trace.SdkTracerProviderBuilder; import java.util.List; -public class BaggageProcessorCustomizer implements AutoConfigurationCustomizerProvider { +@AutoService(AutoConfigurationCustomizerProvider.class) +public final class BaggageProcessorCustomizer implements AutoConfigurationCustomizerProvider { @Override public void customize(AutoConfigurationCustomizer autoConfigurationCustomizer) { autoConfigurationCustomizer @@ -37,7 +39,8 @@ private static void addSpanProcessor( return; } - sdkTracerProviderBuilder.addSpanProcessor(createBaggageSpanProcessor(keys)); + // need to add before the batch span processor + sdkTracerProviderBuilder.addSpanProcessorFirst(createBaggageSpanProcessor(keys)); } static BaggageSpanProcessor createBaggageSpanProcessor(List keys) { @@ -56,7 +59,8 @@ private static void addLogRecordProcessor( return; } - sdkLoggerProviderBuilder.addLogRecordProcessor(createBaggageLogRecordProcessor(keys)); + // need to add before the batch log processor + sdkLoggerProviderBuilder.addLogRecordProcessorFirst(createBaggageLogRecordProcessor(keys)); } static BaggageLogRecordProcessor createBaggageLogRecordProcessor(List keys) { diff --git a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProvider.java b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProvider.java new file mode 100644 index 000000000..19acb6ba7 --- /dev/null +++ b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.baggage.processor; + +import com.google.auto.service.AutoService; +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.internal.IncludeExcludePredicate; +import io.opentelemetry.sdk.trace.SpanProcessor; + +@SuppressWarnings("rawtypes") +@AutoService(ComponentProvider.class) +public class BaggageSpanComponentProvider implements ComponentProvider { + @Override + public String getName() { + return "baggage"; + } + + @Override + public SpanProcessor create(DeclarativeConfigProperties config) { + return new BaggageSpanProcessor( + IncludeExcludePredicate.createPatternMatching( + config.getScalarList("included", String.class), + config.getScalarList("excluded", String.class))); + } + + @Override + public Class getType() { + return SpanProcessor.class; + } +} diff --git a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessor.java b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessor.java index 5f0f53d03..1ba62b19d 100644 --- a/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessor.java +++ b/baggage-processor/src/main/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessor.java @@ -16,7 +16,7 @@ * This span processor copies attributes stored in {@link Baggage} into each newly created {@link * io.opentelemetry.api.trace.Span}. */ -public class BaggageSpanProcessor implements SpanProcessor { +public final class BaggageSpanProcessor implements SpanProcessor { private final Predicate baggageKeyPredicate; /** diff --git a/baggage-processor/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider b/baggage-processor/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider deleted file mode 100644 index 8eb4afb06..000000000 --- a/baggage-processor/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider +++ /dev/null @@ -1 +0,0 @@ -io.opentelemetry.contrib.baggage.processor.BaggageProcessorCustomizer diff --git a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProviderTest.java b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProviderTest.java new file mode 100644 index 000000000..1c8bd28bb --- /dev/null +++ b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageLogRecordComponentProviderTest.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.baggage.processor; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.extension.incubator.fileconfig.DeclarativeConfiguration; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.Test; + +class BaggageLogRecordComponentProviderTest { + + @Test + void declarativeConfig() { + String yaml = + "file_format: 1.0-rc.1\n" + + "logger_provider:\n" + + " processors:\n" + + " - baggage:\n" + + " included: [foo]\n" + + " excluded: [bar]\n"; + + OpenTelemetrySdk sdk = + DeclarativeConfiguration.parseAndCreate( + new ByteArrayInputStream(yaml.getBytes(StandardCharsets.UTF_8))); + + assertThat(sdk).asString().contains("BaggageLogRecordProcessor"); + } +} diff --git a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizerTest.java b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizerTest.java index 9dcb9a4a7..645ff5334 100644 --- a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizerTest.java +++ b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageProcessorCustomizerTest.java @@ -12,13 +12,12 @@ import com.google.common.collect.ImmutableMap; import io.opentelemetry.api.baggage.Baggage; import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.common.ComponentLoader; import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import io.opentelemetry.sdk.OpenTelemetrySdk; import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdkBuilder; -import io.opentelemetry.sdk.autoconfigure.internal.AutoConfigureUtil; -import io.opentelemetry.sdk.autoconfigure.internal.ComponentLoader; import io.opentelemetry.sdk.autoconfigure.internal.SpiHelper; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider; @@ -49,11 +48,15 @@ class BaggageProcessorCustomizerTest { private static final String MEMORY_EXPORTER = "memory"; @Test - void test_customizer() { + void test_empty_customizer() { assertCustomizer( Collections.emptyMap(), span -> assertThat(span).hasTotalAttributeCount(0), logRecord -> assertThat(logRecord).hasTotalAttributeCount(0)); + } + + @Test + void test_customizer() { Map properties = new HashMap<>(); properties.put("otel.java.experimental.span-attributes.copy-from-baggage.include", "key"); properties.put("otel.java.experimental.log-attributes.copy-from-baggage.include", "key"); @@ -113,50 +116,50 @@ private static OpenTelemetrySdk getOpenTelemetrySdk( "none", "otel.logs.exporter", MEMORY_EXPORTER)) - .addPropertiesSupplier(() -> properties); - AutoConfigureUtil.setComponentLoader( - sdkBuilder, - new ComponentLoader() { - @SuppressWarnings("unchecked") - @Override - public List load(Class spiClass) { - if (spiClass == ConfigurableSpanExporterProvider.class) { - return Collections.singletonList( - (T) - new ConfigurableSpanExporterProvider() { - @Override - public SpanExporter createExporter(ConfigProperties configProperties) { - return spanExporter; - } - - @Override - public String getName() { - return MEMORY_EXPORTER; - } - }); - } else if (spiClass == ConfigurableLogRecordExporterProvider.class) { - return Collections.singletonList( - (T) - new ConfigurableLogRecordExporterProvider() { - @Override - public LogRecordExporter createExporter(ConfigProperties configProperties) { - return logRecordExporter; - } - - @Override - public String getName() { - return MEMORY_EXPORTER; - } - }); - } - return spiHelper.load(spiClass); - } - }); + .addPropertiesSupplier(() -> properties) + .setComponentLoader( + new ComponentLoader() { + @Override + public List load(Class spiClass) { + if (spiClass.equals(ConfigurableSpanExporterProvider.class)) { + return Collections.singletonList( + spiClass.cast( + new ConfigurableSpanExporterProvider() { + @Override + public SpanExporter createExporter( + ConfigProperties configProperties) { + return spanExporter; + } + + @Override + public String getName() { + return MEMORY_EXPORTER; + } + })); + } else if (spiClass.equals(ConfigurableLogRecordExporterProvider.class)) { + return Collections.singletonList( + spiClass.cast( + new ConfigurableLogRecordExporterProvider() { + @Override + public LogRecordExporter createExporter( + ConfigProperties configProperties) { + return logRecordExporter; + } + + @Override + public String getName() { + return MEMORY_EXPORTER; + } + })); + } + return spiHelper.load(spiClass); + } + }); return sdkBuilder.build().getOpenTelemetrySdk(); } @Test - public void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSpan span) { + void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSpan span) { try (BaggageSpanProcessor processor = BaggageProcessorCustomizer.createBaggageSpanProcessor(Collections.singletonList("*"))) { try (Scope ignore = Baggage.current().toBuilder().put("key", "value").build().makeCurrent()) { @@ -167,7 +170,7 @@ public void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSp } @Test - public void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches( + void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches( @Mock ReadWriteSpan span) { try (BaggageSpanProcessor processor = BaggageProcessorCustomizer.createBaggageSpanProcessor(Collections.singletonList("key"))) { @@ -185,7 +188,7 @@ public void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_m } @Test - public void test_baggageLogRecordProcessor_adds_attributes_to_logRecord( + void test_baggageLogRecordProcessor_adds_attributes_to_logRecord( @Mock ReadWriteLogRecord logRecord) { try (BaggageLogRecordProcessor processor = BaggageProcessorCustomizer.createBaggageLogRecordProcessor( @@ -198,7 +201,7 @@ public void test_baggageLogRecordProcessor_adds_attributes_to_logRecord( } @Test - public void test_baggageLogRecordProcessor_adds_attributes_to_spans_when_key_filter_matches( + void test_baggageLogRecordProcessor_adds_attributes_to_spans_when_key_filter_matches( @Mock ReadWriteLogRecord logRecord) { try (BaggageLogRecordProcessor processor = BaggageProcessorCustomizer.createBaggageLogRecordProcessor( diff --git a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProviderTest.java b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProviderTest.java new file mode 100644 index 000000000..77399305e --- /dev/null +++ b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanComponentProviderTest.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.baggage.processor; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.extension.incubator.fileconfig.DeclarativeConfiguration; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.Test; + +class BaggageSpanComponentProviderTest { + + @Test + void declarativeConfig() { + String yaml = + "file_format: 1.0-rc.1\n" + + "tracer_provider:\n" + + " processors:\n" + + " - baggage:\n" + + " included: [foo]\n" + + " excluded: [bar]\n"; + + OpenTelemetrySdk sdk = + DeclarativeConfiguration.parseAndCreate( + new ByteArrayInputStream(yaml.getBytes(StandardCharsets.UTF_8))); + + assertThat(sdk).asString().contains("BaggageSpanProcessor"); + } +} diff --git a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessorTest.java b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessorTest.java index ca1180dcc..dd7ea3826 100644 --- a/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessorTest.java +++ b/baggage-processor/src/test/java/io/opentelemetry/contrib/baggage/processor/BaggageSpanProcessorTest.java @@ -17,10 +17,10 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class BaggageSpanProcessorTest { +class BaggageSpanProcessorTest { @Test - public void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSpan span) { + void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSpan span) { try (BaggageSpanProcessor processor = BaggageSpanProcessor.allowAllBaggageKeys()) { try (Scope ignore = Baggage.current().toBuilder().put("key", "value").build().makeCurrent()) { processor.onStart(Context.current(), span); @@ -30,7 +30,7 @@ public void test_baggageSpanProcessor_adds_attributes_to_spans(@Mock ReadWriteSp } @Test - public void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches( + void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches( @Mock ReadWriteSpan span) { try (BaggageSpanProcessor processor = new BaggageSpanProcessor(key -> key.startsWith("k"))) { try (Scope ignore = @@ -47,7 +47,7 @@ public void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_m } @Test - public void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches_regex( + void test_baggageSpanProcessor_adds_attributes_to_spans_when_key_filter_matches_regex( @Mock ReadWriteSpan span) { Pattern pattern = Pattern.compile("k.*"); try (BaggageSpanProcessor processor = diff --git a/build.gradle.kts b/build.gradle.kts index 6e586f336..2422f54ff 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -14,7 +14,10 @@ nexusPublishing { packageGroup.set("io.opentelemetry") repositories { + // see https://central.sonatype.org/publish/publish-portal-ossrh-staging-api/#configuration sonatype { + nexusUrl.set(uri("https://ossrh-staging-api.central.sonatype.com/service/local/")) + snapshotRepositoryUrl.set(uri("https://central.sonatype.com/repository/maven-snapshots/")) username.set(System.getenv("SONATYPE_USER")) password.set(System.getenv("SONATYPE_KEY")) } diff --git a/buildSrc/build.gradle.kts b/buildSrc/build.gradle.kts index 406d7ff5b..e963b8e8e 100644 --- a/buildSrc/build.gradle.kts +++ b/buildSrc/build.gradle.kts @@ -1,7 +1,7 @@ plugins { `kotlin-dsl` // When updating, update below in dependencies too - id("com.diffplug.spotless") version "7.0.3" + id("com.diffplug.spotless") version "8.0.0" } repositories { @@ -12,10 +12,14 @@ repositories { dependencies { // When updating, update above in plugins too - implementation("com.diffplug.spotless:spotless-plugin-gradle:7.0.3") - implementation("net.ltgt.gradle:gradle-errorprone-plugin:4.1.0") - implementation("net.ltgt.gradle:gradle-nullaway-plugin:2.2.0") - implementation("org.owasp:dependency-check-gradle:12.1.1") + implementation("com.diffplug.spotless:com.diffplug.spotless.gradle.plugin:8.0.0") + implementation("net.ltgt.errorprone:net.ltgt.errorprone.gradle.plugin:4.3.0") + implementation("net.ltgt.nullaway:net.ltgt.nullaway.gradle.plugin:2.3.0") + implementation("org.owasp.dependencycheck:org.owasp.dependencycheck.gradle.plugin:12.1.6") + implementation("ru.vyarus.animalsniffer:ru.vyarus.animalsniffer.gradle.plugin:2.0.1") + implementation("com.gradle.develocity:com.gradle.develocity.gradle.plugin:4.2.1") + implementation("me.champeau.gradle.japicmp:me.champeau.gradle.japicmp.gradle.plugin:0.4.6") + implementation("com.google.auto.value:auto-value-annotations:1.11.0") } spotless { diff --git a/buildSrc/src/main/kotlin/otel.animalsniffer-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.animalsniffer-conventions.gradle.kts new file mode 100644 index 000000000..3fda84f4c --- /dev/null +++ b/buildSrc/src/main/kotlin/otel.animalsniffer-conventions.gradle.kts @@ -0,0 +1,19 @@ +import ru.vyarus.gradle.plugin.animalsniffer.AnimalSniffer + +plugins { + id("otel.java-conventions") + id("ru.vyarus.animalsniffer") +} + +dependencies { + signature("com.toasttab.android:gummy-bears-api-21:0.12.0:coreLib@signature") +} + +animalsniffer { + sourceSets = listOf(java.sourceSets.main.get()) +} + +// Always having declared output makes this task properly participate in tasks up-to-date checks +tasks.withType { + reports.text.required.set(true) +} diff --git a/buildSrc/src/main/kotlin/otel.errorprone-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.errorprone-conventions.gradle.kts index 1dfc48318..113576db3 100644 --- a/buildSrc/src/main/kotlin/otel.errorprone-conventions.gradle.kts +++ b/buildSrc/src/main/kotlin/otel.errorprone-conventions.gradle.kts @@ -49,7 +49,6 @@ tasks { disable("UnnecessarilyFullyQualified") // TODO (trask) use animal sniffer - disable("Java7ApiChecker") disable("Java8ApiChecker") disable("AndroidJdkLibsChecker") @@ -79,8 +78,8 @@ tasks { // cognitive load is dubious. disable("YodaCondition") - // We get this warning in modules that compile for old java versions - disable("StringConcatToTextBlock") + // Requires adding compile dependency to JSpecify + disable("AddNullMarkedToPackageInfo") if (name.contains("Jmh") || name.contains("Test")) { // Allow underscore in test-type method names diff --git a/buildSrc/src/main/kotlin/otel.japicmp-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.japicmp-conventions.gradle.kts new file mode 100644 index 000000000..3905c2526 --- /dev/null +++ b/buildSrc/src/main/kotlin/otel.japicmp-conventions.gradle.kts @@ -0,0 +1,158 @@ +import com.google.auto.value.AutoValue +import japicmp.model.* +import me.champeau.gradle.japicmp.JapicmpTask +import me.champeau.gradle.japicmp.report.Violation +import me.champeau.gradle.japicmp.report.stdrules.* + + +plugins { + base + + id("me.champeau.gradle.japicmp") +} + +/** + * The latest *released* version of the project. Evaluated lazily so the work is only done if necessary. + */ +val latestReleasedVersion: String by lazy { + // hack to find the current released version of the project + val temp: Configuration = configurations.create("tempConfig") { + resolutionStrategy.cacheChangingModulesFor(0, "seconds") + resolutionStrategy.cacheDynamicVersionsFor(0, "seconds") + } + // pick aws-xray, since it's a stable module that's always there. + dependencies.add(temp.name, "io.opentelemetry.contrib:opentelemetry-aws-xray:latest.release") + val moduleVersion = configurations["tempConfig"].resolvedConfiguration.firstLevelModuleDependencies.elementAt(0).moduleVersion + configurations.remove(temp) + logger.debug("Discovered latest release version: " + moduleVersion) + moduleVersion +} + +class AllowNewAbstractMethodOnAutovalueClasses : AbstractRecordingSeenMembers() { + override fun maybeAddViolation(member: JApiCompatibility): Violation? { + val allowableAutovalueChanges = setOf(JApiCompatibilityChangeType.METHOD_ABSTRACT_ADDED_TO_CLASS, + JApiCompatibilityChangeType.METHOD_ADDED_TO_PUBLIC_CLASS, JApiCompatibilityChangeType.ANNOTATION_ADDED) + if (member.compatibilityChanges.filter { !allowableAutovalueChanges.contains(it.type) }.isEmpty() && + member is JApiMethod && isAutoValueClass(member.getjApiClass())) + { + return Violation.accept(member, "Autovalue will automatically add implementation") + } + if (member.compatibilityChanges.isEmpty() && + member is JApiClass && isAutoValueClass(member)) { + return Violation.accept(member, "Autovalue class modification is allowed") + } + return null + } + + fun isAutoValueClass(japiClass: JApiClass): Boolean { + return japiClass.newClass.get().getAnnotation(AutoValue::class.java) != null || + japiClass.newClass.get().getAnnotation(AutoValue.Builder::class.java) != null + } +} + +class SourceIncompatibleRule : AbstractRecordingSeenMembers() { + override fun maybeAddViolation(member: JApiCompatibility): Violation? { + if (!member.isSourceCompatible()) { + return Violation.error(member, "Not source compatible: $member") + } + return null + } +} + +/** + * Locate the project's artifact of a particular version. + */ +fun findArtifact(version: String): File { + val existingGroup = group + try { + // Temporarily change the group name because we want to fetch an artifact with the same + // Maven coordinates as the project, which Gradle would not allow otherwise. + group = "virtual_group" + val depModule = "io.opentelemetry.contrib:${base.archivesName.get()}:$version@jar" + val depJar = "${base.archivesName.get()}-$version.jar" + val configuration: Configuration = configurations.detachedConfiguration( + dependencies.create(depModule), + ) + return files(configuration.files).filter { + it.name.equals(depJar) + }.singleFile + } finally { + group = existingGroup + } +} + +// generate the api diff report for any module that is stable and publishes a jar. +if (project.findProperty("otel.stable") == "true" && !project.name.startsWith("bom")) { + afterEvaluate { + tasks { + val jApiCmp by registering(JapicmpTask::class) { + dependsOn("jar") + + // the japicmp "new" version is either the user-specified one, or the locally built jar. + val apiNewVersion: String? by project + val newArtifact = apiNewVersion?.let { findArtifact(it) } + ?: file(getByName("jar").archiveFile) + newClasspath.from(files(newArtifact)) + + // only output changes, not everything + onlyModified.set(true) + + // the japicmp "old" version is either the user-specified one, or the latest release. + val apiBaseVersion: String? by project + val baselineVersion = apiBaseVersion ?: latestReleasedVersion + oldClasspath.from( + try { + files(findArtifact(baselineVersion)) + } catch (e: Exception) { + // if we can't find the baseline artifact, this is probably one that's never been published before, + // so publish the whole API. We do that by flipping this flag, and comparing the current against nothing. + onlyModified.set(false) + files() + }, + ) + + // Reproduce defaults from https://github.com/melix/japicmp-gradle-plugin/blob/09f52739ef1fccda6b4310cf3f4b19dc97377024/src/main/java/me/champeau/gradle/japicmp/report/ViolationsGenerator.java#L130 + // with some changes. + val exclusions = mutableListOf() + // Generics are not detected correctly + exclusions.add("CLASS_GENERIC_TEMPLATE_CHANGED") + // Allow new default methods on interfaces + exclusions.add("METHOD_NEW_DEFAULT") + // Allow adding default implementations for default methods + exclusions.add("METHOD_ABSTRACT_NOW_DEFAULT") + // Bug prevents recognizing default methods of superinterface. + // Fixed in https://github.com/siom79/japicmp/pull/343 but not yet available in me.champeau.gradle.japicmp + exclusions.add("METHOD_ABSTRACT_ADDED_IN_IMPLEMENTED_INTERFACE") + compatibilityChangeExcludes.set(exclusions) + richReport { + addSetupRule(RecordSeenMembersSetup::class.java) + addRule(JApiChangeStatus.NEW, SourceCompatibleRule::class.java) + addRule(JApiChangeStatus.MODIFIED, SourceCompatibleRule::class.java) + addRule(JApiChangeStatus.UNCHANGED, UnchangedMemberRule::class.java) + // Allow new abstract methods on autovalue + addRule(AllowNewAbstractMethodOnAutovalueClasses::class.java) + addRule(BinaryIncompatibleRule::class.java) + // Disallow source incompatible changes, which are allowed by default for some reason + addRule(SourceIncompatibleRule::class.java) + } + + // this is needed so that we only consider the current artifact, and not dependencies + ignoreMissingClasses.set(true) + packageExcludes.addAll( + "*.internal", + "*.internal.*" + ) + annotationExcludes.add("@kotlin.Metadata") + val baseVersionString = if (apiBaseVersion == null) "latest" else baselineVersion + txtOutputFile.set( + apiNewVersion?.let { file("$rootDir/docs/apidiffs/${apiNewVersion}_vs_$baselineVersion/${base.archivesName.get()}.txt") } + ?: file("$rootDir/docs/apidiffs/current_vs_$baseVersionString/${base.archivesName.get()}.txt"), + ) + } + // have the check task depend on the api comparison task, to make it more likely it will get used. + named("check") { + dependsOn(jApiCmp) + } + } + } +} diff --git a/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts index 75b9ff2f8..f512ccf2c 100644 --- a/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts +++ b/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts @@ -7,6 +7,7 @@ plugins { id("otel.errorprone-conventions") id("otel.spotless-conventions") + id("otel.japicmp-conventions") id("org.owasp.dependencycheck") } @@ -66,10 +67,23 @@ tasks { withType().configureEach { useJUnitPlatform() + val maxTestRetries = gradle.startParameter.projectProperties["maxTestRetries"]?.toInt() ?: 0 + develocity.testRetry { + // You can see tests that were retried by this mechanism in the collected test reports and build scans. + maxRetries.set(maxTestRetries) + } + testLogging { exceptionFormat = TestExceptionFormat.FULL showStandardStreams = true } + + configure { + // only care about code coverage for code in this repository + // (in particular avoiding netty classes which sometimes end up + // causing sporadic CI failures) + includes = listOf("io/opentelemetry/contrib/**") + } } withType().configureEach { @@ -93,12 +107,13 @@ plugins.withId("otel.publish-conventions") { register("generateVersionResource") { val moduleName = otelJava.moduleName val propertiesDir = moduleName.map { layout.buildDirectory.file("generated/properties/${it.replace('.', '/')}") } + val projectVersion = project.version.toString() - inputs.property("project.version", project.version.toString()) + inputs.property("project.version", projectVersion) outputs.dir(propertiesDir) doLast { - File(propertiesDir.get().get().asFile, "version.properties").writeText("contrib.version=${project.version}") + File(propertiesDir.get().get().asFile, "version.properties").writeText("contrib.version=${projectVersion}") } } } @@ -133,12 +148,12 @@ dependencies { testing { suites.withType(JvmTestSuite::class).configureEach { dependencies { - implementation(project(project.path)) + implementation(project()) - implementation(enforcedPlatform("org.junit:junit-bom:5.12.2")) - implementation(enforcedPlatform("org.testcontainers:testcontainers-bom:1.20.6")) - implementation(enforcedPlatform("com.google.guava:guava-bom:33.4.8-jre")) - implementation(enforcedPlatform("com.linecorp.armeria:armeria-bom:1.32.4")) + implementation(enforcedPlatform("org.junit:junit-bom:5.14.0")) + implementation(enforcedPlatform("org.testcontainers:testcontainers-bom:1.21.3")) + implementation(enforcedPlatform("com.google.guava:guava-bom:33.5.0-jre")) + implementation(enforcedPlatform("com.linecorp.armeria:armeria-bom:1.33.4")) compileOnly("com.google.auto.value:auto-value-annotations") compileOnly("com.google.errorprone:error_prone_annotations") diff --git a/buildSrc/src/main/kotlin/otel.publish-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.publish-conventions.gradle.kts index 7b922edc8..070edf183 100644 --- a/buildSrc/src/main/kotlin/otel.publish-conventions.gradle.kts +++ b/buildSrc/src/main/kotlin/otel.publish-conventions.gradle.kts @@ -57,6 +57,17 @@ publishing { developerConnection.set("scm:git:git@github.com:open-telemetry/opentelemetry-java-contrib.git") url.set("git@github.com:open-telemetry/opentelemetry-java-contrib.git") } + + withXml { + // Since 5.0 okhttp uses gradle metadata to choose either okhttp-jvm or okhttp-android. + // This does not work for maven builds that don't understand gradle metadata. They end up + // using the okhttp artifact that is an empty jar. Here we replace usages of okhttp with + // okhttp-jvm so that maven could get the actual okhttp dependency instead of the empty jar. + var result = asString() + var modified = result.toString().replace(">okhttp<", ">okhttp-jvm<") + result.clear() + result.append(modified) + } } } } diff --git a/buildSrc/src/main/kotlin/otel.spotless-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.spotless-conventions.gradle.kts index b1c39dcd0..f3d387872 100644 --- a/buildSrc/src/main/kotlin/otel.spotless-conventions.gradle.kts +++ b/buildSrc/src/main/kotlin/otel.spotless-conventions.gradle.kts @@ -8,11 +8,12 @@ spotless { licenseHeaderFile(rootProject.file("buildscripts/spotless.license.java"), "(package|import|public|// Includes work from:)") target("src/**/*.java") } - plugins.withId("groovy") { - groovy { - licenseHeaderFile(rootProject.file("buildscripts/spotless.license.java"), "(package|import|class)") - } - } + // commented out for now due to incompatibility with gradle cache configuration + // plugins.withId("groovy") { + // groovy { + // licenseHeaderFile(rootProject.file("buildscripts/spotless.license.java"), "(package|import|class)") + // } + // } plugins.withId("scala") { scala { scalafmt() diff --git a/cloudfoundry-resources/README.md b/cloudfoundry-resources/README.md index 355f9ce6e..8ba62971c 100644 --- a/cloudfoundry-resources/README.md +++ b/cloudfoundry-resources/README.md @@ -17,7 +17,7 @@ This variable contains a JSON structure, which is parsed to fill the following a | cloudfoundry.space.id | space_id | | cloudfoundry.space.name | space_name | -The resource attributes follow the [CloudFoundry semantic convention.](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/attributes-registry/cloudfoundry.md). +The resource attributes follow the [CloudFoundry semantic convention.](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/resource/cloudfoundry.md). A description of `VCAP_APPLICATION` is available in the [CloudFoundry documentation](https://docs.cloudfoundry.org/devguide/deploy-apps/environment-variable.html#VCAP-APPLICATION). ## Component owners diff --git a/cloudfoundry-resources/build.gradle.kts b/cloudfoundry-resources/build.gradle.kts index e768f7389..d70c44500 100644 --- a/cloudfoundry-resources/build.gradle.kts +++ b/cloudfoundry-resources/build.gradle.kts @@ -9,6 +9,7 @@ otelJava.moduleName.set("io.opentelemetry.contrib.cloudfoundry.resources") dependencies { api("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") api("io.opentelemetry:opentelemetry-sdk") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") @@ -19,6 +20,38 @@ dependencies { testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") + testImplementation("io.opentelemetry:opentelemetry-exporter-logging") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") +} + +tasks { + withType().configureEach { + environment( + "VCAP_APPLICATION" to """ + { + "application_id": "0193a038-e615-7e5e-92ca-f4bcd7ba0a25", + "application_name": "cf-app-name", + "application_uris": [ + "testapp.example.com" + ], + "cf_api": "https://api.cf.example.com", + "limits": { + "fds": 256 + }, + "instance_index": 1, + "organization_id": "0193a375-8d8e-7e0c-a832-01ce9ded40dc", + "organization_name": "cf-org-name", + "process_id": "0193a4e3-8fd3-71b9-9fe3-5640c53bf1e2", + "process_type": "web", + "space_id": "0193a7e7-da17-7ea4-8940-b1e07b401b16", + "space_name": "cf-space-name", + "users": null + } + """.trimIndent(), + ) + jvmArgs("-Dotel.experimental.config.file=${project.projectDir.resolve("src/test/resources/declarative-config.yaml")}") + } } diff --git a/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResource.java b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResource.java index 7d6313928..c8e7bd2f2 100644 --- a/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResource.java +++ b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResource.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.cloudfoundry.resources; +import static io.opentelemetry.api.common.AttributeKey.stringKey; + import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; @@ -22,24 +24,22 @@ public final class CloudFoundryResource { private static final String ENV_VCAP_APPLICATION = "VCAP_APPLICATION"; // copied from CloudfoundryIncubatingAttributes - private static final AttributeKey CLOUDFOUNDRY_APP_ID = - AttributeKey.stringKey("cloudfoundry.app.id"); + private static final AttributeKey CLOUDFOUNDRY_APP_ID = stringKey("cloudfoundry.app.id"); private static final AttributeKey CLOUDFOUNDRY_APP_INSTANCE_ID = - AttributeKey.stringKey("cloudfoundry.app.instance.id"); + stringKey("cloudfoundry.app.instance.id"); private static final AttributeKey CLOUDFOUNDRY_APP_NAME = - AttributeKey.stringKey("cloudfoundry.app.name"); - private static final AttributeKey CLOUDFOUNDRY_ORG_ID = - AttributeKey.stringKey("cloudfoundry.org.id"); + stringKey("cloudfoundry.app.name"); + private static final AttributeKey CLOUDFOUNDRY_ORG_ID = stringKey("cloudfoundry.org.id"); private static final AttributeKey CLOUDFOUNDRY_ORG_NAME = - AttributeKey.stringKey("cloudfoundry.org.name"); + stringKey("cloudfoundry.org.name"); private static final AttributeKey CLOUDFOUNDRY_PROCESS_ID = - AttributeKey.stringKey("cloudfoundry.process.id"); + stringKey("cloudfoundry.process.id"); private static final AttributeKey CLOUDFOUNDRY_PROCESS_TYPE = - AttributeKey.stringKey("cloudfoundry.process.type"); + stringKey("cloudfoundry.process.type"); private static final AttributeKey CLOUDFOUNDRY_SPACE_ID = - AttributeKey.stringKey("cloudfoundry.space.id"); + stringKey("cloudfoundry.space.id"); private static final AttributeKey CLOUDFOUNDRY_SPACE_NAME = - AttributeKey.stringKey("cloudfoundry.space.name"); + stringKey("cloudfoundry.space.name"); private static final Logger LOG = Logger.getLogger(CloudFoundryResource.class.getName()); private static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final Resource INSTANCE = buildResource(System::getenv); diff --git a/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceDetector.java b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceDetector.java new file mode 100644 index 000000000..357d83533 --- /dev/null +++ b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceDetector.java @@ -0,0 +1,28 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.cloudfoundry.resources; + +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; + +public final class CloudFoundryResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "cloud_foundry"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + return CloudFoundryResource.get(); + } +} diff --git a/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceProvider.java b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceProvider.java index e3f3e3c64..992eb93dc 100644 --- a/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceProvider.java +++ b/cloudfoundry-resources/src/main/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceProvider.java @@ -9,7 +9,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.ResourceProvider; import io.opentelemetry.sdk.resources.Resource; -public class CloudFoundryResourceProvider implements ResourceProvider { +public final class CloudFoundryResourceProvider implements ResourceProvider { @Override public Resource createResource(ConfigProperties configProperties) { diff --git a/cloudfoundry-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider b/cloudfoundry-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider new file mode 100644 index 000000000..96092ce3d --- /dev/null +++ b/cloudfoundry-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider @@ -0,0 +1 @@ +io.opentelemetry.contrib.cloudfoundry.resources.CloudFoundryResourceDetector diff --git a/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceTest.java b/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceTest.java index 1c533cd8a..96474c966 100644 --- a/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceTest.java +++ b/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/CloudFoundryResourceTest.java @@ -5,7 +5,9 @@ package io.opentelemetry.contrib.cloudfoundry.resources; +import static java.util.stream.Collectors.joining; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.semconv.SchemaUrls; @@ -18,8 +20,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; -import org.assertj.core.api.Assertions; import org.junit.jupiter.api.Test; class CloudFoundryResourceTest { @@ -36,11 +36,11 @@ private static String loadVcapApplicationSample(String filename) { if (is != null) { return new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)) .lines() - .collect(Collectors.joining()); + .collect(joining()); } - Assertions.fail("Cannot load resource " + filename); + fail("Cannot load resource " + filename); } catch (IOException e) { - Assertions.fail("Error reading " + filename); + fail("Error reading " + filename); } return ""; } diff --git a/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/ResourceComponentProviderTest.java b/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/ResourceComponentProviderTest.java new file mode 100644 index 000000000..b4b659156 --- /dev/null +++ b/cloudfoundry-resources/src/test/java/io/opentelemetry/contrib/cloudfoundry/resources/ResourceComponentProviderTest.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.cloudfoundry.resources; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; +import org.assertj.core.api.InstanceOfAssertFactory; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + @Test + void endToEnd() { + assertThat( + AutoConfiguredOpenTelemetrySdk.builder() + .build() + .getOpenTelemetrySdk() + .getSdkTracerProvider()) + .extracting("sharedState") + .extracting("resource") + .extracting( + "attributes", + new InstanceOfAssertFactory<>(Attributes.class, OpenTelemetryAssertions::assertThat)) + .containsEntry("cloudfoundry.app.name", "cf-app-name"); + } +} diff --git a/cloudfoundry-resources/src/test/resources/declarative-config.yaml b/cloudfoundry-resources/src/test/resources/declarative-config.yaml new file mode 100644 index 000000000..dc6ddf5d3 --- /dev/null +++ b/cloudfoundry-resources/src/test/resources/declarative-config.yaml @@ -0,0 +1,10 @@ +file_format: "1.0-rc.1" +resource: + detection/development: + detectors: + - cloud_foundry: +tracer_provider: + processors: + - simple: + exporter: + console: diff --git a/compressors/compressor-zstd/build.gradle.kts b/compressors/compressor-zstd/build.gradle.kts index 63db3ed20..acc7f3fe9 100644 --- a/compressors/compressor-zstd/build.gradle.kts +++ b/compressors/compressor-zstd/build.gradle.kts @@ -9,7 +9,7 @@ otelJava.moduleName.set("io.opentelemetry.contrib.compressor.zstd") dependencies { api("io.opentelemetry:opentelemetry-exporter-common") - implementation("com.github.luben:zstd-jni:1.5.7-2") + implementation("com.github.luben:zstd-jni:1.5.7-5") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") testImplementation("io.opentelemetry:opentelemetry-exporter-otlp") diff --git a/consistent-sampling/README.md b/consistent-sampling/README.md index 4f848eb3a..49f4c0565 100644 --- a/consistent-sampling/README.md +++ b/consistent-sampling/README.md @@ -5,7 +5,7 @@ There are two major components included here. ## Original proposal implementation The original specification for consistent probability sampling is defined by - + and . It supports sampling probabilities that are power of 2 (1, 1/2, 1/4, ...), and uses 8-bit `r-value` and 8-bit `p-value` in tracestate. @@ -14,18 +14,18 @@ The implementation of this proposal is contained by the package `io/opentelemetr * **ConsistentSampler**: abstract base class of all consistent sampler implementations below * **ConsistentAlwaysOffSampler**: - see + see * **ConsistentAlwaysOnSampler**: - see + see * **ConsistentComposedAndSampler**: allows combining two consistent samplers and samples when both samplers would sample * **ConsistentComposedOrSampler**: allows combining two consistent sampler and samples when at least one of both samplers would sample, - see + see * **ConsistentParentBasedSampler**: - see + see * **ConsistentProbabilityBasedSampler**: - see + see * **ConsistentRateLimitingSampler**: a rate limiting sampler based on exponential smoothing that dynamically adjusts the sampling probability based on the estimated rate of spans occurring to satisfy a given rate of sampled spans diff --git a/consistent-sampling/build.gradle.kts b/consistent-sampling/build.gradle.kts index 88cdf543a..5fc4135bb 100644 --- a/consistent-sampling/build.gradle.kts +++ b/consistent-sampling/build.gradle.kts @@ -9,6 +9,17 @@ otelJava.moduleName.set("io.opentelemetry.contrib.sampler") dependencies { api("io.opentelemetry:opentelemetry-sdk-trace") api("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") - testImplementation("org.hipparchus:hipparchus-core:4.0.1") - testImplementation("org.hipparchus:hipparchus-stat:4.0.1") + testImplementation("org.hipparchus:hipparchus-core:4.0.2") + testImplementation("org.hipparchus:hipparchus-stat:4.0.2") +} + +tasks { + withType().configureEach { + develocity.testRetry { + // TODO (trask) fix flaky tests and remove this workaround + if (System.getenv().containsKey("CI")) { + maxRetries.set(5) + } + } + } } diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentRateLimitingSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentRateLimitingSampler.java index 9c2b93f74..14d6dfee4 100644 --- a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentRateLimitingSampler.java +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentRateLimitingSampler.java @@ -74,7 +74,7 @@ private static final class State { private final double effectiveWindowNanos; private final long lastNanoTime; - public State(double effectiveWindowCount, double effectiveWindowNanos, long lastNanoTime) { + State(double effectiveWindowCount, double effectiveWindowNanos, long lastNanoTime) { this.effectiveWindowCount = effectiveWindowCount; this.effectiveWindowNanos = effectiveWindowNanos; this.lastNanoTime = lastNanoTime; diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessor.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessor.java index c522bf1c6..67defd745 100644 --- a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessor.java +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessor.java @@ -55,7 +55,7 @@ private static final class ReadableSpanWithPriority { private final int rval; private final long priority; - public static ReadableSpanWithPriority create( + static ReadableSpanWithPriority create( ReadableSpan readableSpan, RandomGenerator randomGenerator) { String otelTraceStateString = readableSpan.getSpanContext().getTraceState().get(OtelTraceState.TRACE_STATE_KEY); @@ -201,7 +201,7 @@ private static final class Reservoir { private final PriorityQueue queue; private final RandomGenerator randomGenerator; - public Reservoir(int reservoirSize, RandomGenerator randomGenerator) { + Reservoir(int reservoirSize, RandomGenerator randomGenerator) { if (reservoirSize < 1) { throw new IllegalArgumentException(); } @@ -211,7 +211,7 @@ public Reservoir(int reservoirSize, RandomGenerator randomGenerator) { this.randomGenerator = randomGenerator; } - public void add(ReadableSpanWithPriority readableSpanWithPriority) { + void add(ReadableSpanWithPriority readableSpanWithPriority) { if (queue.size() < reservoirSize) { queue.add(readableSpanWithPriority); @@ -232,7 +232,7 @@ public void add(ReadableSpanWithPriority readableSpanWithPriority) { } } - public List getResult() { + List getResult() { if (numberOfDiscardedSpansWithMaxDiscardedRValue == 0) { return queue.stream().map(x -> x.readableSpan.toSpanData()).collect(Collectors.toList()); @@ -294,7 +294,7 @@ public List getResult() { return result; } - public boolean isEmpty() { + boolean isEmpty() { return queue.isEmpty(); } } diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSampler.java index f2e92651c..253edf709 100644 --- a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSampler.java +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSampler.java @@ -5,42 +5,20 @@ package io.opentelemetry.contrib.sampler.consistent56; -import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateSamplingProbability; -import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.checkThreshold; -import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getInvalidThreshold; -import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getMaxThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateThreshold; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.trace.SpanKind; -import io.opentelemetry.context.Context; -import io.opentelemetry.sdk.trace.data.LinkData; -import java.util.List; - -public class ConsistentFixedThresholdSampler extends ConsistentSampler { +public class ConsistentFixedThresholdSampler extends ConsistentThresholdSampler { private final long threshold; private final String description; protected ConsistentFixedThresholdSampler(long threshold) { - checkThreshold(threshold); - this.threshold = threshold; - - String thresholdString; - if (threshold == getMaxThreshold()) { - thresholdString = "max"; - } else { - thresholdString = - ConsistentSamplingUtil.appendLast56BitHexEncodedWithoutTrailingZeros( - new StringBuilder(), threshold) - .toString(); - } + this.threshold = getThreshold(threshold); + this.description = getThresholdDescription(threshold); + } - this.description = - "ConsistentFixedThresholdSampler{threshold=" - + thresholdString - + ", sampling probability=" - + calculateSamplingProbability(threshold) - + "}"; + protected ConsistentFixedThresholdSampler(double samplingProbability) { + this(calculateThreshold(samplingProbability)); } @Override @@ -49,18 +27,7 @@ public String getDescription() { } @Override - public SamplingIntent getSamplingIntent( - Context parentContext, - String name, - SpanKind spanKind, - Attributes attributes, - List parentLinks) { - - return () -> { - if (threshold == getMaxThreshold()) { - return getInvalidThreshold(); - } - return threshold; - }; + public long getThreshold() { + return threshold; } } diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSampler.java index b58bee96a..0075c5692 100644 --- a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSampler.java +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSampler.java @@ -104,7 +104,7 @@ private static final class State { private final double effectiveDelegateProbability; private final long lastNanoTime; - public State( + State( double effectiveWindowCount, double effectiveWindowNanos, long lastNanoTime, diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSampler.java index 1b2cedf08..22ee83b8c 100644 --- a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSampler.java +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSampler.java @@ -55,6 +55,17 @@ public static ConsistentSampler probabilityBased(double samplingProbability) { return new ConsistentFixedThresholdSampler(threshold); } + /** + * Returns a {@link ConsistentSampler} that samples each span with a known probability, where the + * probablity can be dynamically updated. + * + * @param samplingProbability the sampling probability + * @return a sampler + */ + public static ConsistentSampler updateableProbabilityBased(double samplingProbability) { + return new ConsistentVariableThresholdSampler(samplingProbability); + } + /** * Returns a new {@link ConsistentSampler} that respects the sampling decision of the parent span * or falls-back to the given sampler if it is a root span. @@ -186,10 +197,19 @@ public final SamplingResult shouldSample( boolean isSampled; boolean isAdjustedCountCorrect; if (isValidThreshold(threshold)) { - long randomness = getRandomness(otelTraceState, traceId); - isSampled = threshold <= randomness; isAdjustedCountCorrect = intent.isAdjustedCountReliable(); - } else { // DROP + // determine the randomness value to use + long randomness; + if (isAdjustedCountCorrect) { + randomness = getRandomness(otelTraceState, traceId); + } else { + // We cannot assume any particular distribution of the provided trace randomness, + // because the sampling decision may depend directly or indirectly on the randomness value; + // however, we still want to sample with probability corresponding to the obtained threshold + randomness = RandomValueGenerators.getDefault().generate(traceId); + } + isSampled = threshold <= randomness; + } else { // invalid threshold, DROP isSampled = false; isAdjustedCountCorrect = false; } diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentThresholdSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentThresholdSampler.java new file mode 100644 index 000000000..63c1dbeaa --- /dev/null +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentThresholdSampler.java @@ -0,0 +1,60 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.sampler.consistent56; + +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.appendLast56BitHexEncodedWithoutTrailingZeros; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateSamplingProbability; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.checkThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getInvalidThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getMaxThreshold; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import java.util.List; + +public abstract class ConsistentThresholdSampler extends ConsistentSampler { + + protected abstract long getThreshold(); + + protected static long getThreshold(long threshold) { + checkThreshold(threshold); + return threshold; + } + + protected static String getThresholdDescription(long threshold) { + String thresholdString; + if (threshold == getMaxThreshold()) { + thresholdString = "max"; + } else { + thresholdString = + appendLast56BitHexEncodedWithoutTrailingZeros(new StringBuilder(), threshold).toString(); + } + + return "ConsistentFixedThresholdSampler{threshold=" + + thresholdString + + ", sampling probability=" + + calculateSamplingProbability(threshold) + + "}"; + } + + @Override + public SamplingIntent getSamplingIntent( + Context parentContext, + String name, + SpanKind spanKind, + Attributes attributes, + List parentLinks) { + + return () -> { + if (getThreshold() == getMaxThreshold()) { + return getInvalidThreshold(); + } + return getThreshold(); + }; + } +} diff --git a/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSampler.java b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSampler.java new file mode 100644 index 000000000..1558e961c --- /dev/null +++ b/consistent-sampling/src/main/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSampler.java @@ -0,0 +1,56 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.sampler.consistent56; + +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateSamplingProbability; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.checkThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getMaxThreshold; + +public class ConsistentVariableThresholdSampler extends ConsistentThresholdSampler { + + private volatile long threshold; + private volatile String description = ""; + + protected ConsistentVariableThresholdSampler(double samplingProbability) { + setSamplingProbability(samplingProbability); + } + + @Override + public String getDescription() { + return description; + } + + @Override + public long getThreshold() { + return threshold; + } + + public void setSamplingProbability(double samplingProbability) { + long threshold = calculateThreshold(samplingProbability); + checkThreshold(threshold); + this.threshold = threshold; + + String thresholdString; + if (threshold == getMaxThreshold()) { + thresholdString = "max"; + } else { + thresholdString = + ConsistentSamplingUtil.appendLast56BitHexEncodedWithoutTrailingZeros( + new StringBuilder(), threshold) + .toString(); + } + + // tiny eventual consistency where the description would be out of date with the threshold, + // but this doesn't really matter + this.description = + "ConsistentVariableThresholdSampler{threshold=" + + thresholdString + + ", sampling probability=" + + calculateSamplingProbability(threshold) + + "}"; + } +} diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentProbabilityBasedSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentProbabilityBasedSamplerTest.java index 4b9d3e425..a05506bc8 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentProbabilityBasedSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentProbabilityBasedSamplerTest.java @@ -6,7 +6,6 @@ package io.opentelemetry.contrib.sampler.consistent; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertTrue; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; @@ -25,7 +24,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class ConsistentProbabilityBasedSamplerTest { +class ConsistentProbabilityBasedSamplerTest { private Context parentContext; private String traceId; @@ -63,8 +62,8 @@ private void test(SplittableRandom rng, double samplingProbability) { .getUpdatedTraceState(TraceState.getDefault()) .get(OtelTraceState.TRACE_STATE_KEY); OtelTraceState traceState = OtelTraceState.parse(traceStateString); - assertTrue(traceState.hasValidR()); - assertTrue(traceState.hasValidP()); + assertThat(traceState.hasValidR()).isTrue(); + assertThat(traceState.hasValidP()).isTrue(); observedPvalues.merge(traceState.getP(), 1L, Long::sum); } } @@ -72,7 +71,7 @@ private void test(SplittableRandom rng, double samplingProbability) { } @Test - public void test() { + void test() { // fix seed to get reproducible results SplittableRandom random = new SplittableRandom(0); diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessorTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessorTest.java index 476a31983..1415d0f6f 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessorTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentReservoirSamplingSpanProcessorTest.java @@ -11,7 +11,6 @@ import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode; import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; import static org.awaitility.Awaitility.await; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.argThat; @@ -125,7 +124,7 @@ public CompletableResultCode shutdown() { return CompletableResultCode.ofSuccess(); } - public void reset() { + void reset() { this.countDownLatch = new CountDownLatch(numberOfSpansToWaitFor); } } @@ -566,8 +565,8 @@ private void testConsistentSampling( String traceStateString = spanData.getSpanContext().getTraceState().get(OtelTraceState.TRACE_STATE_KEY); OtelTraceState traceState = OtelTraceState.parse(traceStateString); - assertTrue(traceState.hasValidR()); - assertTrue(traceState.hasValidP()); + assertThat(traceState.hasValidR()).isTrue(); + assertThat(traceState.hasValidP()).isTrue(); observedPvalues.merge(traceState.getP(), 1L, Long::sum); totalAdjustedCount += 1L << traceState.getP(); spanNameCounts.merge(spanData.getName(), 1L, Long::sum); diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentSamplerTest.java index 082ac3068..1a61868c8 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/ConsistentSamplerTest.java @@ -8,9 +8,7 @@ import static io.opentelemetry.contrib.sampler.consistent.OtelTraceState.getInvalidP; import static io.opentelemetry.contrib.sampler.consistent.OtelTraceState.getInvalidR; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.Span; @@ -32,45 +30,50 @@ class ConsistentSamplerTest { @Test void testGetSamplingRate() { - assertThrows( - IllegalArgumentException.class, () -> ConsistentSampler.getSamplingProbability(-1)); + assertThatThrownBy(() -> ConsistentSampler.getSamplingProbability(-1)) + .isInstanceOf(IllegalArgumentException.class); for (int i = 0; i < OtelTraceState.getMaxP() - 1; i += 1) { - assertEquals(Math.pow(0.5, i), ConsistentSampler.getSamplingProbability(i)); + assertThat(ConsistentSampler.getSamplingProbability(i)).isEqualTo(Math.pow(0.5, i)); } - assertEquals(0., ConsistentSampler.getSamplingProbability(OtelTraceState.getMaxP())); - assertThrows( - IllegalArgumentException.class, - () -> ConsistentSampler.getSamplingProbability(OtelTraceState.getMaxP() + 1)); + assertThat(ConsistentSampler.getSamplingProbability(OtelTraceState.getMaxP())).isEqualTo(0.); + assertThatThrownBy(() -> ConsistentSampler.getSamplingProbability(OtelTraceState.getMaxP() + 1)) + .isInstanceOf(IllegalArgumentException.class); } @Test void testGetLowerBoundP() { - assertEquals(0, ConsistentSampler.getLowerBoundP(1.0)); - assertEquals(0, ConsistentSampler.getLowerBoundP(Math.nextDown(1.0))); + assertThat(ConsistentSampler.getLowerBoundP(1.0)).isEqualTo(0); + assertThat(ConsistentSampler.getLowerBoundP(Math.nextDown(1.0))).isEqualTo(0); for (int i = 1; i < OtelTraceState.getMaxP() - 1; i += 1) { double samplingProbability = Math.pow(0.5, i); - assertEquals(i, ConsistentSampler.getLowerBoundP(samplingProbability)); - assertEquals(i - 1, ConsistentSampler.getLowerBoundP(Math.nextUp(samplingProbability))); - assertEquals(i, ConsistentSampler.getLowerBoundP(Math.nextDown(samplingProbability))); + assertThat(ConsistentSampler.getLowerBoundP(samplingProbability)).isEqualTo(i); + assertThat(ConsistentSampler.getLowerBoundP(Math.nextUp(samplingProbability))) + .isEqualTo(i - 1); + assertThat(ConsistentSampler.getLowerBoundP(Math.nextDown(samplingProbability))).isEqualTo(i); } - assertEquals(OtelTraceState.getMaxP() - 1, ConsistentSampler.getLowerBoundP(Double.MIN_NORMAL)); - assertEquals(OtelTraceState.getMaxP() - 1, ConsistentSampler.getLowerBoundP(Double.MIN_VALUE)); - assertEquals(OtelTraceState.getMaxP(), ConsistentSampler.getLowerBoundP(0.0)); + assertThat(ConsistentSampler.getLowerBoundP(Double.MIN_NORMAL)) + .isEqualTo(OtelTraceState.getMaxP() - 1); + assertThat(ConsistentSampler.getLowerBoundP(Double.MIN_VALUE)) + .isEqualTo(OtelTraceState.getMaxP() - 1); + assertThat(ConsistentSampler.getLowerBoundP(0.0)).isEqualTo(OtelTraceState.getMaxP()); } @Test void testGetUpperBoundP() { - assertEquals(0, ConsistentSampler.getUpperBoundP(1.0)); - assertEquals(1, ConsistentSampler.getUpperBoundP(Math.nextDown(1.0))); + assertThat(ConsistentSampler.getUpperBoundP(1.0)).isEqualTo(0); + assertThat(ConsistentSampler.getUpperBoundP(Math.nextDown(1.0))).isEqualTo(1); for (int i = 1; i < OtelTraceState.getMaxP() - 1; i += 1) { double samplingProbability = Math.pow(0.5, i); - assertEquals(i, ConsistentSampler.getUpperBoundP(samplingProbability)); - assertEquals(i, ConsistentSampler.getUpperBoundP(Math.nextUp(samplingProbability))); - assertEquals(i + 1, ConsistentSampler.getUpperBoundP(Math.nextDown(samplingProbability))); + assertThat(ConsistentSampler.getUpperBoundP(samplingProbability)).isEqualTo(i); + assertThat(ConsistentSampler.getUpperBoundP(Math.nextUp(samplingProbability))).isEqualTo(i); + assertThat(ConsistentSampler.getUpperBoundP(Math.nextDown(samplingProbability))) + .isEqualTo(i + 1); } - assertEquals(OtelTraceState.getMaxP(), ConsistentSampler.getUpperBoundP(Double.MIN_NORMAL)); - assertEquals(OtelTraceState.getMaxP(), ConsistentSampler.getUpperBoundP(Double.MIN_VALUE)); - assertEquals(OtelTraceState.getMaxP(), ConsistentSampler.getUpperBoundP(0.0)); + assertThat(ConsistentSampler.getUpperBoundP(Double.MIN_NORMAL)) + .isEqualTo(OtelTraceState.getMaxP()); + assertThat(ConsistentSampler.getUpperBoundP(Double.MIN_VALUE)) + .isEqualTo(OtelTraceState.getMaxP()); + assertThat(ConsistentSampler.getUpperBoundP(0.0)).isEqualTo(OtelTraceState.getMaxP()); } @Test @@ -168,18 +171,18 @@ private static void assertConsistentSampling( SamplingResult samplingResult = sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); - assertEquals(expectSampled, getSampledFlag(samplingResult)); + assertThat(getSampledFlag(samplingResult)).isEqualTo(expectSampled); OptionalInt p = getP(samplingResult, parentContext); if (OtelTraceState.isValidP(expectedP)) { - assertEquals(expectedP, p.getAsInt()); + assertThat(p.getAsInt()).isEqualTo(expectedP); } else { - assertFalse(p.isPresent()); + assertThat(p.isPresent()).isFalse(); } OptionalInt r = getR(samplingResult, parentContext); if (OtelTraceState.isValidR(expectedR)) { - assertEquals(expectedR, r.getAsInt()); + assertThat(r.getAsInt()).isEqualTo(expectedR); } else { - assertFalse(r.isPresent()); + assertThat(r.isPresent()).isFalse(); } } diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/OtelTraceStateTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/OtelTraceStateTest.java index a6fd85d47..fbb6b6dc7 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/OtelTraceStateTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/OtelTraceStateTest.java @@ -5,75 +5,72 @@ package io.opentelemetry.contrib.sampler.consistent; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -public class OtelTraceStateTest { +class OtelTraceStateTest { private static String getXString(int len) { return Stream.generate(() -> "X").limit(len).collect(Collectors.joining()); } @Test - public void test() { + void test() { - Assertions.assertEquals("", OtelTraceState.parse("").serialize()); - assertEquals("", OtelTraceState.parse("").serialize()); + assertThat(OtelTraceState.parse("").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("").serialize()).isEqualTo(""); - assertEquals("", OtelTraceState.parse("a").serialize()); - assertEquals("", OtelTraceState.parse("#").serialize()); - assertEquals("", OtelTraceState.parse(" ").serialize()); + assertThat(OtelTraceState.parse("a").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("#").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse(" ").serialize()).isEqualTo(""); - assertEquals("p:5", OtelTraceState.parse("p:5").serialize()); - assertEquals("p:63", OtelTraceState.parse("p:63").serialize()); - assertEquals("", OtelTraceState.parse("p:64").serialize()); - assertEquals("", OtelTraceState.parse("p:5;").serialize()); - assertEquals("", OtelTraceState.parse("p:99").serialize()); - assertEquals("", OtelTraceState.parse("p:").serialize()); - assertEquals("", OtelTraceState.parse("p:232").serialize()); - assertEquals("", OtelTraceState.parse("x;p:5").serialize()); - assertEquals("", OtelTraceState.parse("p:5;x").serialize()); - assertEquals("p:5;x:3", OtelTraceState.parse("x:3;p:5").serialize()); - assertEquals("p:5;x:3", OtelTraceState.parse("p:5;x:3").serialize()); - assertEquals("", OtelTraceState.parse("p:5;x:3;").serialize()); - assertEquals( - "p:5;a:" + getXString(246) + ";x:3", - OtelTraceState.parse("a:" + getXString(246) + ";p:5;x:3").serialize()); - assertEquals("", OtelTraceState.parse("a:" + getXString(247) + ";p:5;x:3").serialize()); + assertThat(OtelTraceState.parse("p:5").serialize()).isEqualTo("p:5"); + assertThat(OtelTraceState.parse("p:63").serialize()).isEqualTo("p:63"); + assertThat(OtelTraceState.parse("p:64").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p:5;").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p:99").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p:").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p:232").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("x;p:5").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p:5;x").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("x:3;p:5").serialize()).isEqualTo("p:5;x:3"); + assertThat(OtelTraceState.parse("p:5;x:3").serialize()).isEqualTo("p:5;x:3"); + assertThat(OtelTraceState.parse("p:5;x:3;").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("a:" + getXString(246) + ";p:5;x:3").serialize()) + .isEqualTo("p:5;a:" + getXString(246) + ";x:3"); + assertThat(OtelTraceState.parse("a:" + getXString(247) + ";p:5;x:3").serialize()).isEqualTo(""); - assertEquals("r:5", OtelTraceState.parse("r:5").serialize()); - assertEquals("r:62", OtelTraceState.parse("r:62").serialize()); - assertEquals("", OtelTraceState.parse("r:63").serialize()); - assertEquals("", OtelTraceState.parse("r:5;").serialize()); - assertEquals("", OtelTraceState.parse("r:99").serialize()); - assertEquals("", OtelTraceState.parse("r:").serialize()); - assertEquals("", OtelTraceState.parse("r:232").serialize()); - assertEquals("", OtelTraceState.parse("x;r:5").serialize()); - assertEquals("", OtelTraceState.parse("r:5;x").serialize()); - assertEquals("r:5;x:3", OtelTraceState.parse("x:3;r:5").serialize()); - assertEquals("r:5;x:3", OtelTraceState.parse("r:5;x:3").serialize()); - assertEquals("", OtelTraceState.parse("r:5;x:3;").serialize()); - assertEquals( - "r:5;a:" + getXString(246) + ";x:3", - OtelTraceState.parse("a:" + getXString(246) + ";r:5;x:3").serialize()); - assertEquals("", OtelTraceState.parse("a:" + getXString(247) + ";r:5;x:3").serialize()); + assertThat(OtelTraceState.parse("r:5").serialize()).isEqualTo("r:5"); + assertThat(OtelTraceState.parse("r:62").serialize()).isEqualTo("r:62"); + assertThat(OtelTraceState.parse("r:63").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("r:5;").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("r:99").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("r:").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("r:232").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("x;r:5").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("r:5;x").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("x:3;r:5").serialize()).isEqualTo("r:5;x:3"); + assertThat(OtelTraceState.parse("r:5;x:3").serialize()).isEqualTo("r:5;x:3"); + assertThat(OtelTraceState.parse("r:5;x:3;").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("a:" + getXString(246) + ";r:5;x:3").serialize()) + .isEqualTo("r:5;a:" + getXString(246) + ";x:3"); + assertThat(OtelTraceState.parse("a:" + getXString(247) + ";r:5;x:3").serialize()).isEqualTo(""); - assertEquals("p:7;r:5", OtelTraceState.parse("r:5;p:7").serialize()); - assertEquals("p:4;r:5", OtelTraceState.parse("r:5;p:4").serialize()); - assertEquals("p:7;r:5", OtelTraceState.parse("r:5;p:7").serialize()); - assertEquals("p:4;r:5", OtelTraceState.parse("r:5;p:4").serialize()); + assertThat(OtelTraceState.parse("r:5;p:7").serialize()).isEqualTo("p:7;r:5"); + assertThat(OtelTraceState.parse("r:5;p:4").serialize()).isEqualTo("p:4;r:5"); + assertThat(OtelTraceState.parse("r:5;p:7").serialize()).isEqualTo("p:7;r:5"); + assertThat(OtelTraceState.parse("r:5;p:4").serialize()).isEqualTo("p:4;r:5"); - assertEquals("r:6", OtelTraceState.parse("r:5;r:6").serialize()); - assertEquals("p:6;r:10", OtelTraceState.parse("p:5;p:6;r:10").serialize()); - assertEquals("", OtelTraceState.parse("p5;p:6;r:10").serialize()); - assertEquals("p:6;r:10;p5:3", OtelTraceState.parse("p5:3;p:6;r:10").serialize()); - assertEquals("", OtelTraceState.parse(":p:6;r:10").serialize()); - assertEquals("", OtelTraceState.parse(";p:6;r:10").serialize()); - assertEquals("", OtelTraceState.parse("_;p:6;r:10").serialize()); - assertEquals("", OtelTraceState.parse("5;p:6;r:10").serialize()); + assertThat(OtelTraceState.parse("r:5;r:6").serialize()).isEqualTo("r:6"); + assertThat(OtelTraceState.parse("p:5;p:6;r:10").serialize()).isEqualTo("p:6;r:10"); + assertThat(OtelTraceState.parse("p5;p:6;r:10").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("p5:3;p:6;r:10").serialize()).isEqualTo("p:6;r:10;p5:3"); + assertThat(OtelTraceState.parse(":p:6;r:10").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse(";p:6;r:10").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("_;p:6;r:10").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("5;p:6;r:10").serialize()).isEqualTo(""); } } diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/RandomGeneratorTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/RandomGeneratorTest.java index f94e7eef4..e2f336727 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/RandomGeneratorTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent/RandomGeneratorTest.java @@ -14,7 +14,7 @@ import org.hipparchus.stat.inference.GTest; import org.junit.jupiter.api.Test; -public class RandomGeneratorTest { +class RandomGeneratorTest { private static void testGenerateRandomBitSet(long seed, int numBits, int numOneBits) { diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOffSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOffSamplerTest.java index 9b5fc050b..d0425aa0b 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOffSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOffSamplerTest.java @@ -10,7 +10,7 @@ import org.junit.jupiter.api.Test; -public class ConsistentAlwaysOffSamplerTest { +class ConsistentAlwaysOffSamplerTest { @Test void testDescription() { diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOnSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOnSamplerTest.java index 3a6b8531b..115c39c41 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOnSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentAlwaysOnSamplerTest.java @@ -10,7 +10,7 @@ import org.junit.jupiter.api.Test; -public class ConsistentAlwaysOnSamplerTest { +class ConsistentAlwaysOnSamplerTest { @Test void testDescription() { diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSamplerTest.java index 7eac3ffb1..3d78de81a 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentFixedThresholdSamplerTest.java @@ -25,7 +25,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class ConsistentFixedThresholdSamplerTest { +class ConsistentFixedThresholdSamplerTest { private Context parentContext; private String name; @@ -75,7 +75,7 @@ private void testSampling(SplittableRandom rng, double samplingProbability) { } @Test - public void testSampling() { + void testSampling() { // fix seed to get reproducible results SplittableRandom random = new SplittableRandom(0); @@ -92,7 +92,7 @@ public void testSampling() { } @Test - public void testDescription() { + void testDescription() { assertThat(ConsistentSampler.probabilityBased(1.0).getDescription()) .isEqualTo("ConsistentFixedThresholdSampler{threshold=0, sampling probability=1.0}"); assertThat(ConsistentSampler.probabilityBased(0.5).getDescription()) diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSamplerTest.java index cc56df1ef..d5cb6b640 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentRateLimitingSamplerTest.java @@ -10,7 +10,11 @@ import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.samplers.SamplingDecision; @@ -357,6 +361,200 @@ void testProportionalBehavior() { .isCloseTo(targetSpansPerSecondLimit, Percentage.withPercentage(5)); } + @Test + void testUnstableDelegate() { + // Assume there are 10,000 spans/s and the delegate samples 50% of them with probability 100%, + // and unconditionally rejects the rest. + // + // Now, if we do not want to sample more than 1000 spans/s overall, the rate limiting + // sampler should calculate the effective threshold correctly. + + double targetSpansPerSecondLimit = 1000; + double adaptationTimeSeconds = 5; + + Composable delegate = + new CoinFlipSampler(ConsistentSampler.alwaysOff(), ConsistentSampler.alwaysOn()); + + ConsistentSampler sampler = + ConsistentSampler.rateLimited( + delegate, targetSpansPerSecondLimit, adaptationTimeSeconds, nanoTimeSupplier); + + long averageRequestRatePerSecond = 10000; + int numSpans = 1000000; + + List spanSampledNanos = new ArrayList<>(); + + for (int i = 0; i < numSpans; ++i) { + advanceTime(randomInterval(averageRequestRatePerSecond)); + SamplingResult samplingResult = + sampler.shouldSample( + parentContext, + generateRandomTraceId(random), + name, + spanKind, + attributes, + parentLinks); + if (SamplingDecision.RECORD_AND_SAMPLE.equals(samplingResult.getDecision())) { + spanSampledNanos.add(getCurrentTimeNanos()); + } + } + + long timeNow = nanoTime[0]; + long numSampledSpansInLast5Seconds = + spanSampledNanos.stream().filter(x -> x > timeNow - 5000000000L && x <= timeNow).count(); + + assertThat(numSampledSpansInLast5Seconds / 5.) + .isCloseTo(targetSpansPerSecondLimit, Percentage.withPercentage(5)); + } + + @Test + void testLegacyCase() { + // This test makes sure that the issue + // https://github.com/open-telemetry/opentelemetry-java-contrib/issues/2007 + // is resolved. + + long averageRequestRatePerSecond = 10000; + + // Assume the following setup: + // The root span is sampled by the legacy sampler AlwaysOn. + // One of its descendant spans, which we will call "parent" span, is sampled with + // stage1: ConsistentRateLimitingSampler(ConsistentParentBasedSampler, 5000/s). + // This will sample approximately 50% of the spans. + + // Its "child" is similarly sampled by + // stage2: ConsistentRateLimitingSampler(ConsistentParentBasedSampler, 2500/s). + + // This sampler will generate the same output as the root span described above: + // - the threshold will be 0, so all spans will be sampled + // - isAdjustedCountReliable will be false + // - there will be no threshold in TraceState, but the sampling flag will be set + Composable mockRootSampler = new LegacyLikeComposable(ConsistentSampler.alwaysOn()); + + double targetSpansPerSecondLimit = 2500; // for stage2 + double adaptationTimeSeconds = 5; + + // The sampler for "parent" spans + ConsistentSampler stage1 = + ConsistentSampler.rateLimited( + mockRootSampler, + 2 * targetSpansPerSecondLimit, + adaptationTimeSeconds, + nanoTimeSupplier); + + // The sampler for "child" spans (it will never see root spans) + ConsistentSampler stage2 = + ConsistentSampler.rateLimited( + ConsistentSampler.parentBased(ConsistentSampler.alwaysOff()), + targetSpansPerSecondLimit, + adaptationTimeSeconds, + nanoTimeSupplier); + + int numSpans = 1000000; + int stage1SampledCount = 0; + int stage2SampledCount = 0; + + for (int i = 0; i < numSpans; ++i) { + advanceTime(randomInterval(averageRequestRatePerSecond)); + String traceId = generateRandomTraceId(random); + + // Stage 1 sampling, the "parent" + SamplingResult samplingResult1 = + stage1.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + + boolean isSampled = SamplingDecision.RECORD_AND_SAMPLE.equals(samplingResult1.getDecision()); + if (isSampled) { + stage1SampledCount++; + } + + // Prepare the context for the child span, pass parent's TraceState to the child + Span parentSpan = Span.fromContext(parentContext); + SpanContext parentSpanContext = parentSpan.getSpanContext(); + TraceState parentSamplingTraceState = + samplingResult1.getUpdatedTraceState(parentSpanContext.getTraceState()); + + SpanContext childSpanContext = + SpanContext.create( + traceId, + "1000badbadbad000", + isSampled ? TraceFlags.getSampled() : TraceFlags.getDefault(), + parentSamplingTraceState); + Span childSpan = Span.wrap(childSpanContext); + Context childContext = childSpan.storeInContext(parentContext); + + // Stage 2 sampling, the "child" + SamplingResult samplingResult2 = + stage2.shouldSample(childContext, traceId, name, spanKind, attributes, parentLinks); + + if (SamplingDecision.RECORD_AND_SAMPLE.equals(samplingResult2.getDecision())) { + stage2SampledCount++; + } + } + + long timeNow = nanoTime[0]; + double duration = timeNow / 1000000000.0; // in seconds + assertThat(duration) + .isCloseTo(numSpans / (double) averageRequestRatePerSecond, Percentage.withPercentage(2)); + + assertThat(stage1SampledCount / duration) + .isCloseTo(2 * targetSpansPerSecondLimit, Percentage.withPercentage(2)); + + assertThat(stage2SampledCount / duration) + .isCloseTo(targetSpansPerSecondLimit, Percentage.withPercentage(2)); + } + + /* + * An auxiliary class used to simulate the behavior of a legacy (non consistent-probability) + * sampler, just for testing mixed environment + */ + static class LegacyLikeComposable implements Composable { + + private final Composable delegate; + + public LegacyLikeComposable(Composable delegate) { + this.delegate = delegate; + } + + @Override + public SamplingIntent getSamplingIntent( + Context parentContext, + String name, + SpanKind spanKind, + Attributes attributes, + List parentLinks) { + + SamplingIntent delegateIntent = + delegate.getSamplingIntent(parentContext, name, spanKind, attributes, parentLinks); + + return new SamplingIntent() { + @Override + public long getThreshold() { + return delegateIntent.getThreshold(); + } + + @Override + public boolean isAdjustedCountReliable() { + // Forcing "legacy" behavior, no threshold will be put into TraceState + return false; + } + + @Override + public Attributes getAttributes() { + return delegateIntent.getAttributes(); + } + + @Override + public TraceState updateTraceState(TraceState previousState) { + return delegateIntent.updateTraceState(previousState); + } + }; + } + + @Override + public String getDescription() { + return "LegacyLike(" + delegate.getDescription() + ")"; + } + } + @Test void testDescription() { diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplerTest.java index a246e248f..7725bb57a 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplerTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplerTest.java @@ -37,42 +37,42 @@ private static class Input { private OptionalLong parentThreshold = OptionalLong.empty(); private OptionalLong parentRandomValue = OptionalLong.empty(); - public void setParentSampled(boolean parentSampled) { + void setParentSampled(boolean parentSampled) { this.parentSampled = parentSampled; } - public void setParentThreshold(long parentThreshold) { + void setParentThreshold(long parentThreshold) { assertThat(parentThreshold).isBetween(0L, 0xffffffffffffffL); this.parentThreshold = OptionalLong.of(parentThreshold); } - public void setParentRandomValue(long parentRandomValue) { + void setParentRandomValue(long parentRandomValue) { assertThat(parentRandomValue).isBetween(0L, 0xffffffffffffffL); this.parentRandomValue = OptionalLong.of(parentRandomValue); } - public Context getParentContext() { + Context getParentContext() { return createParentContext( traceId, spanId, parentThreshold, parentRandomValue, parentSampled); } - public static String getTraceId() { + static String getTraceId() { return traceId; } - public static String getName() { + static String getName() { return name; } - public static SpanKind getSpanKind() { + static SpanKind getSpanKind() { return spanKind; } - public static Attributes getAttributes() { + static Attributes getAttributes() { return attributes; } - public static List getParentLinks() { + static List getParentLinks() { return parentLinks; } } @@ -87,10 +87,6 @@ private static class Output { this.parentContext = parentContext; } - boolean getSampledFlag() { - return SamplingDecision.RECORD_AND_SAMPLE.equals(samplingResult.getDecision()); - } - OptionalLong getThreshold() { Span parentSpan = Span.fromContext(parentContext); OtelTraceState otelTraceState = @@ -163,7 +159,6 @@ void testMinThresholdWithoutParentRandomValue() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); assertThat(output.getThreshold()).hasValue(0); assertThat(output.getRandomValue()).isNotPresent(); - assertThat(output.getSampledFlag()).isTrue(); } @Test @@ -181,7 +176,6 @@ void testMinThresholdWithParentRandomValue() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); assertThat(output.getThreshold()).hasValue(0); assertThat(output.getRandomValue()).hasValue(parentRandomValue); - assertThat(output.getSampledFlag()).isTrue(); } @Test @@ -194,9 +188,8 @@ void testMaxThreshold() { Output output = sample(input, sampler); assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.DROP); - assertThat(output.getThreshold()).isEmpty(); + assertThat(output.getThreshold()).isNotPresent(); assertThat(output.getRandomValue()).isNotPresent(); - assertThat(output.getSampledFlag()).isFalse(); } @Test @@ -216,7 +209,6 @@ void testParentBasedInConsistentMode() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); assertThat(output.getThreshold()).hasValue(parentRandomValue); assertThat(output.getRandomValue()).hasValue(parentRandomValue); - assertThat(output.getSampledFlag()).isTrue(); } @Test @@ -232,7 +224,6 @@ void testParentBasedInLegacyMode() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); assertThat(output.getThreshold()).isNotPresent(); assertThat(output.getRandomValue()).isNotPresent(); - assertThat(output.getSampledFlag()).isTrue(); } @Test @@ -248,7 +239,6 @@ void testHalfThresholdNotSampled() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.DROP); assertThat(output.getThreshold()).isNotPresent(); assertThat(output.getRandomValue()).hasValue(0x7FFFFFFFFFFFFFL); - assertThat(output.getSampledFlag()).isFalse(); } @Test @@ -264,7 +254,6 @@ void testHalfThresholdSampled() { assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); assertThat(output.getThreshold()).hasValue(0x80000000000000L); assertThat(output.getRandomValue()).hasValue(0x80000000000000L); - assertThat(output.getSampledFlag()).isTrue(); } @Test @@ -279,9 +268,7 @@ void testParentViolatingInvariant() { Output output = sample(input, sampler); assertThat(output.samplingResult.getDecision()).isEqualTo(SamplingDecision.RECORD_AND_SAMPLE); - assertThat(output.getThreshold()).hasValue(0x0L); assertThat(output.getRandomValue()).hasValue(0x80000000000000L); - assertThat(output.getSampledFlag()).isTrue(); } } diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplingUtilTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplingUtilTest.java index fcf2dcd8d..d612f9e0a 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplingUtilTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentSamplingUtilTest.java @@ -19,7 +19,7 @@ import org.junit.jupiter.api.Test; -public class ConsistentSamplingUtilTest { +class ConsistentSamplingUtilTest { @Test void testCalculateSamplingProbability() { diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSamplerTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSamplerTest.java new file mode 100644 index 000000000..90428fad9 --- /dev/null +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/ConsistentVariableThresholdSamplerTest.java @@ -0,0 +1,49 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.sampler.consistent56; + +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.calculateThreshold; +import static io.opentelemetry.contrib.sampler.consistent56.ConsistentSamplingUtil.getMaxThreshold; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; + +class ConsistentVariableThresholdSamplerTest { + + @Test + void testSetSamplingProbability() { + double probability = 0.5; + ConsistentVariableThresholdSampler sampler = + new ConsistentVariableThresholdSampler(probability); + testSetSamplingProbability(probability, sampler, /* updateProbability= */ false); + testSetSamplingProbability(0.25, sampler, /* updateProbability= */ true); + testSetSamplingProbability(0.0, sampler, /* updateProbability= */ true); + testSetSamplingProbability(1.0, sampler, /* updateProbability= */ true); + } + + private static void testSetSamplingProbability( + double probability, ConsistentVariableThresholdSampler sampler, boolean updateProbability) { + long threshold = calculateThreshold(probability); + String thresholdString = + ConsistentSamplingUtil.appendLast56BitHexEncodedWithoutTrailingZeros( + new StringBuilder(), threshold) + .toString(); + if (threshold == getMaxThreshold()) { + thresholdString = "max"; + } + if (updateProbability) { + sampler.setSamplingProbability(probability); + } + assertThat(sampler.getThreshold()).isEqualTo(threshold); + assertThat(sampler.getDescription()) + .isEqualTo( + "ConsistentVariableThresholdSampler{threshold=" + + thresholdString + + ", sampling probability=" + + probability + + "}"); + } +} diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/OtelTraceStateTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/OtelTraceStateTest.java index a131e9b78..8e8b3ef96 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/OtelTraceStateTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/OtelTraceStateTest.java @@ -5,71 +5,79 @@ package io.opentelemetry.contrib.sampler.consistent56; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import java.util.stream.Collectors; import java.util.stream.Stream; import org.junit.jupiter.api.Test; -public class OtelTraceStateTest { +class OtelTraceStateTest { private static String getXString(int len) { return Stream.generate(() -> "X").limit(len).collect(Collectors.joining()); } @Test - public void test() { + void test() { - assertEquals("", OtelTraceState.parse("").serialize()); - assertEquals("", OtelTraceState.parse("").serialize()); + assertThat(OtelTraceState.parse("").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("").serialize()).isEqualTo(""); - assertEquals("", OtelTraceState.parse("a").serialize()); - assertEquals("", OtelTraceState.parse("#").serialize()); - assertEquals("", OtelTraceState.parse(" ").serialize()); + assertThat(OtelTraceState.parse("a").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("#").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse(" ").serialize()).isEqualTo(""); - assertEquals("rv:1234567890abcd", OtelTraceState.parse("rv:1234567890abcd").serialize()); - assertEquals("rv:01020304050607", OtelTraceState.parse("rv:01020304050607").serialize()); - assertEquals("", OtelTraceState.parse("rv:1234567890abcde").serialize()); + assertThat(OtelTraceState.parse("rv:1234567890abcd").serialize()) + .isEqualTo("rv:1234567890abcd"); + assertThat(OtelTraceState.parse("rv:01020304050607").serialize()) + .isEqualTo("rv:01020304050607"); + assertThat(OtelTraceState.parse("rv:1234567890abcde").serialize()).isEqualTo(""); - assertEquals("th:1234567890abcd", OtelTraceState.parse("th:1234567890abcd").serialize()); - assertEquals("th:01020304050607", OtelTraceState.parse("th:01020304050607").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10000000000000").serialize()); - assertEquals("th:12345", OtelTraceState.parse("th:1234500000000").serialize()); - assertEquals("th:0", OtelTraceState.parse("th:0").serialize()); // TODO - assertEquals("", OtelTraceState.parse("th:100000000000000").serialize()); - assertEquals("", OtelTraceState.parse("th:1234567890abcde").serialize()); + assertThat(OtelTraceState.parse("th:1234567890abcd").serialize()) + .isEqualTo("th:1234567890abcd"); + assertThat(OtelTraceState.parse("th:01020304050607").serialize()) + .isEqualTo("th:01020304050607"); + assertThat(OtelTraceState.parse("th:10000000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1234500000000").serialize()).isEqualTo("th:12345"); + assertThat(OtelTraceState.parse("th:0").serialize()).isEqualTo("th:0"); // TODO + assertThat(OtelTraceState.parse("th:100000000000000").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("th:1234567890abcde").serialize()).isEqualTo(""); - assertEquals( - "th:1234567890abcd;rv:1234567890abcd;a:" + getXString(214) + ";x:3", - OtelTraceState.parse("a:" + getXString(214) + ";rv:1234567890abcd;th:1234567890abcd;x:3") - .serialize()); - assertEquals( - "", - OtelTraceState.parse("a:" + getXString(215) + ";rv:1234567890abcd;th:1234567890abcd;x:3") - .serialize()); + assertThat( + OtelTraceState.parse( + "a:" + getXString(214) + ";rv:1234567890abcd;th:1234567890abcd;x:3") + .serialize()) + .isEqualTo("th:1234567890abcd;rv:1234567890abcd;a:" + getXString(214) + ";x:3"); + assertThat( + OtelTraceState.parse( + "a:" + getXString(215) + ";rv:1234567890abcd;th:1234567890abcd;x:3") + .serialize()) + .isEqualTo(""); - assertEquals("", OtelTraceState.parse("th:x").serialize()); - assertEquals("", OtelTraceState.parse("th:100000000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10000000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:1000000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:100000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:1000000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:100000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:1000000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:100000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:1000").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:100").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:10").serialize()); - assertEquals("th:1", OtelTraceState.parse("th:1").serialize()); + assertThat(OtelTraceState.parse("th:x").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("th:100000000000000").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("th:10000000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1000000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:100000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:10000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1000000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:100000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:10000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1000000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:100000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:10000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1000").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:100").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:10").serialize()).isEqualTo("th:1"); + assertThat(OtelTraceState.parse("th:1").serialize()).isEqualTo("th:1"); - assertEquals("th:10000000000001", OtelTraceState.parse("th:10000000000001").serialize()); - assertEquals("th:1000000000001", OtelTraceState.parse("th:10000000000010").serialize()); - assertEquals("", OtelTraceState.parse("rv:x").serialize()); - assertEquals("", OtelTraceState.parse("rv:100000000000000").serialize()); - assertEquals("rv:10000000000000", OtelTraceState.parse("rv:10000000000000").serialize()); - assertEquals("", OtelTraceState.parse("rv:1000000000000").serialize()); + assertThat(OtelTraceState.parse("th:10000000000001").serialize()) + .isEqualTo("th:10000000000001"); + assertThat(OtelTraceState.parse("th:10000000000010").serialize()).isEqualTo("th:1000000000001"); + assertThat(OtelTraceState.parse("rv:x").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("rv:100000000000000").serialize()).isEqualTo(""); + assertThat(OtelTraceState.parse("rv:10000000000000").serialize()) + .isEqualTo("rv:10000000000000"); + assertThat(OtelTraceState.parse("rv:1000000000000").serialize()).isEqualTo(""); } } diff --git a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/RandomValueGeneratorsTest.java b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/RandomValueGeneratorsTest.java index ab7d378b6..d9a34255f 100644 --- a/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/RandomValueGeneratorsTest.java +++ b/consistent-sampling/src/test/java/io/opentelemetry/contrib/sampler/consistent56/RandomValueGeneratorsTest.java @@ -10,7 +10,7 @@ import org.junit.jupiter.api.Test; -public class RandomValueGeneratorsTest { +class RandomValueGeneratorsTest { @Test void testRandomRange() { int attempts = 10000; diff --git a/dependencyManagement/build.gradle.kts b/dependencyManagement/build.gradle.kts index 5739a81f9..2221dfd27 100644 --- a/dependencyManagement/build.gradle.kts +++ b/dependencyManagement/build.gradle.kts @@ -2,8 +2,8 @@ plugins { `java-platform` } -val otelInstrumentationVersion = "2.15.0-alpha" -val semconvVersion = "1.32.0" +val otelInstrumentationVersion = "2.20.1-alpha" +val semconvVersion = "1.37.0" javaPlatform { allowDependencies() @@ -14,21 +14,23 @@ dependencies { // under JvmTestSuite so they don't show up as runtime dependencies in license and vulnerability scans // (the constraints section below doesn't have this issue, and will only show up // as runtime dependencies if they are actually used as runtime dependencies) - api(enforcedPlatform("io.opentelemetry.instrumentation:opentelemetry-instrumentation-bom-alpha:${otelInstrumentationVersion}")) - api(enforcedPlatform("com.fasterxml.jackson:jackson-bom:2.18.3")) + api(platform("io.opentelemetry.instrumentation:opentelemetry-instrumentation-bom-alpha:${otelInstrumentationVersion}")) + api(platform("com.fasterxml.jackson:jackson-bom:2.20.0")) + api(platform("com.google.protobuf:protobuf-bom:4.32.1")) + api(platform("com.squareup.okhttp3:okhttp-bom:5.1.0")) constraints { api("io.opentelemetry.semconv:opentelemetry-semconv:${semconvVersion}") - api("io.opentelemetry.semconv:opentelemetry-semconv-incubating:${semconvVersion}") + api("io.opentelemetry.semconv:opentelemetry-semconv-incubating:${semconvVersion}-alpha") api("com.google.auto.service:auto-service:1.1.1") api("com.google.auto.service:auto-service-annotations:1.1.1") api("com.google.auto.value:auto-value:1.11.0") api("com.google.auto.value:auto-value-annotations:1.11.0") - api("com.google.errorprone:error_prone_annotations:2.37.0") - api("com.google.errorprone:error_prone_core:2.37.0") + api("com.google.errorprone:error_prone_annotations:2.42.0") + api("com.google.errorprone:error_prone_core:2.42.0") api("io.github.netmikey.logunit:logunit-jul:2.0.0") - api("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha") + api("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha") api("io.prometheus:simpleclient:0.16.0") api("io.prometheus:simpleclient_common:0.16.0") api("io.prometheus:simpleclient_httpserver:0.16.0") @@ -43,19 +45,19 @@ dependencies { api("com.google.code.findbugs:annotations:3.0.1u2") api("com.google.code.findbugs:jsr305:3.0.2") - api("com.squareup.okhttp3:okhttp:4.12.0") - api("com.uber.nullaway:nullaway:0.12.6") - api("org.assertj:assertj-core:3.27.3") + api("com.uber.nullaway:nullaway:0.12.10") + api("org.assertj:assertj-core:3.27.6") api("org.awaitility:awaitility:4.3.0") api("org.bouncycastle:bcpkix-jdk15on:1.70") api("org.junit-pioneer:junit-pioneer:1.9.1") api("org.skyscreamer:jsonassert:1.5.3") - api("org.apache.kafka:kafka-clients:3.9.0") - api("org.testcontainers:kafka:1.20.6") + api("org.apache.kafka:kafka-clients:4.1.0") + api("org.testcontainers:kafka:1.21.3") api("com.lmax:disruptor:3.4.4") api("org.jctools:jctools-core:4.0.5") - api("tools.profiler:async-profiler:3.0") + api("tools.profiler:async-profiler:4.1") api("com.blogspot.mydailyjava:weak-lock-free:0.18") api("org.agrona:agrona:1.22.0") + api("com.github.f4b6a3:uuid-creator:6.1.1") } } diff --git a/disk-buffering/DESIGN.md b/disk-buffering/DESIGN.md index 01f6048da..3bd1e3f01 100644 --- a/disk-buffering/DESIGN.md +++ b/disk-buffering/DESIGN.md @@ -1,59 +1,62 @@ # Design Overview -There are three main disk-writing exporters provided by this module: +The core of disk buffering +is [SignalStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java). +SignalStorage is an abstraction that defines the bare minimum functionalities needed for +implementations to allow writing and reading signals. -* [LogRecordToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporter.java) -* [MetricToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporter.java) -* [SpanToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporter.java)) +There is a default implementation per signal that writes serialized signal items to protobuf +delimited messages into files, where each file's name represents a timestamp of when it was created, +which will help later to know when it's ready to read, as well as when it's expired. These +implementations are the following: -Each is responsible for writing a specific type of telemetry to disk storage for later -harvest/ingest. +* [FileSpanStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileSpanStorage.java) +* [FileLogRecordStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileLogRecordStorage.java) +* [FileMetricStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileMetricStorage.java) -For later reading, there are: +Each one has a `create()` method that takes a destination directory (to store data into) and an +optional [FileStorageConfiguration](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileStorageConfiguration.java) +to have a finer control of the storing behavior. -* [LogRecordFromToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordFromDiskExporter.java) -* [MetricFromDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/MetricFromDiskExporter.java) -* [SpanFromDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporter.java)) +Even +though [SignalStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java) +can receive signal items directly to be stored in disk, there are convenience exporter +implementations for each signal that handle the storing process on your behalf. Those are the +following: -Each one of those has a `create()` method that takes a delegate exporter (to send data -to ingest) and the `StorageConfiguration` that tells them where to find buffered data. +* [SpanToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/SpanToDiskExporter.java) +* [LogRecordToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/LogRecordToDiskExporter.java) +* [MetricToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/MetricToDiskExporter.java) -As explained in the [README](README.md), this has to be triggered manually by the consumer of -this library and does not happen automatically. +Each receive their +respective [SignalStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java) +object to delegate signals to as well as an optional callback object to notify its operations. ## Writing overview ![Writing flow](assets/writing-flow.png) -* The writing process happens automatically within its `export(Collection signals)` - method, which is called by the configured signal processor. -* When a set of signals is received, these are delegated over to - a type-specific wrapper of [ToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporter.java) - class which then serializes them using an implementation - of [SignalSerializer](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SignalSerializer.java) - and then the serialized data is appended into a File using an instance of - the [Storage](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java) - class. +* Via the convenience toDisk exporters, the writing process happens automatically within their + `export(Collection signals)` method, which is called by the configured signal + processor. +* When a set of signals is received, these are delegated over to a type-specific serializer + and then the serialized data is appended into a file. * The data is written into a file directly, without the use of a buffer, to make sure no data gets lost in case the application ends unexpectedly. -* Each disk exporter stores its signals in its own folder, which is expected to contain files +* Each signal storage stores its signals in its own folder, which is expected to contain files that belong to that type of signal only. * Each file may contain more than a batch of signals if the configuration parameters allow enough limit size for it. * If the configured folder size for the signals has been reached and a new file is needed to be created to keep storing new data, the oldest available file will be removed to make space for the new one. -* The [Storage](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java), - [FolderManager](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java) - and [WritableFile](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFile.java) - files contain more information on the details of the writing process into a file. ## Reading overview ![Reading flow](assets/reading-flow.png) -* The reading process has to be triggered manually by the library consumer as explained in - the [README](README.md). +* The reading process has to be triggered manually by the library consumer via the signal storage + iterator. * A single file is read at a time and updated to remove the data gathered from it after it is successfully exported, until it's emptied. Each file previously created during the writing process has a timestamp in milliseconds, which is used to determine what file to start @@ -62,9 +65,3 @@ this library and does not happen automatically. the time of creating the disk exporter, then it will be ignored, and the next oldest (and unexpired) one will be used instead. * All the stale and empty files will be removed as a new file is created. -* The [Storage](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java), - [FolderManager](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java) - and [ReadableFile](src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFile.java) - files contain more information on the details of the file reading process. -* Note that the reader delegates the data to the exporter exactly in the way it has received the - data - it does not try to batch data (but this could be an optimization in the future). diff --git a/disk-buffering/README.md b/disk-buffering/README.md index 67dbb1f52..9178794ad 100644 --- a/disk-buffering/README.md +++ b/disk-buffering/README.md @@ -1,115 +1,132 @@ # Disk buffering -This module provides exporters that store telemetry data in files which can be -sent later on demand. A high level description of how it works is that there are two separate -processes in place, one for writing data in disk, and one for reading/exporting the previously -stored data. +This module provides an abstraction +named [SignalStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java), +as well as default implementations for each signal type that allow writing signals to disk and +reading them later. -* Each exporter stores the received data automatically in disk right after it's received from its - processor. -* The reading of the data back from disk and exporting process has to be done manually. At - the moment there's no automatic mechanism to do so. There's more information on how it can be - achieved, under [Reading data](#reading-data). +For a more detailed information on how the whole process works, take a look at +the [DESIGN.md](DESIGN.md) file. -> For a more detailed information on how the whole process works, take a look at -> the [DESIGN.md](DESIGN.md) file. +## Default implementation usage -## Configuration +The default implementations are the following: -The configurable parameters are provided **per exporter**, the available ones are: +* [FileSpanStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileSpanStorage.java) +* [FileLogRecordStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileLogRecordStorage.java) +* [FileMetricStorage](src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileMetricStorage.java) -* Max file size, defaults to 1MB. -* Max folder size, defaults to 10MB. All files are stored in a single folder per-signal, therefore - if all 3 types of signals are stored, the total amount of space from disk to be taken by default - would be of 30MB. -* Max age for file writing, defaults to 30 seconds. -* Min age for file reading, defaults to 33 seconds. It must be greater that the max age for file - writing. -* Max age for file reading, defaults to 18 hours. After that time passes, the file will be - considered stale and will be removed when new files are created. No more data will be read from a - file past this time. -* An instance - of [TemporaryFileProvider](src/main/java/io/opentelemetry/contrib/disk/buffering/config/TemporaryFileProvider.java), - defaults to calling `File.createTempFile`. This provider will be used when reading from the disk - in order create a temporary file from which each line (batch of signals) will be read and - sequentially get removed from the original cache file right after the data has been successfully - exported. - -## Usage +### Set up -### Storing data +We need to create a signal storage object per signal type to start writing signals to disk. Each +`File*Storage` implementation has a `create()` function that receives: + +* A File directory to store the signal files. Note that each signal storage object must have a + dedicated directory to work properly. +* (Optional) a configuration object. -In order to use it, you need to wrap your own exporter with a new instance of -the ones provided in here: +The available configuration parameters are the following: -* For a LogRecordExporter, it must be wrapped within - a [LogRecordToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporter.java). -* For a MetricExporter, it must be wrapped within - a [MetricToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporter.java). -* For a SpanExporter, it must be wrapped within - a [SpanToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporter.java). +* Max file size, defaults to 1MB. +* Max folder size, defaults to 10MB. +* Max age for file writing. It sets the time window where a file can get signals appended to it. + Defaults to 30 seconds. +* Min age for file reading. It sets the time to wait before starting to read from a file after + its creation. Defaults to 33 seconds. It must be greater that the max age for file writing. +* Max age for file reading. After that time passes, the file will be considered stale and will be + removed when new files are created. No more data will be read from a file past this time. Defaults + to 18 hours. -Each wrapper will need the following when instantiating them: +```java +// Root dir +File rootDir = new File("/some/root"); -* The exporter to be wrapped. -* A File instance of the root directory where all the data is going to be written. The same root dir - can be used for all the wrappers, since each will create their own folder inside it. -* An instance - of [StorageConfiguration](src/main/java/io/opentelemetry/contrib/disk/buffering/config/StorageConfiguration.java) - with the desired parameters. You can create one with default values by - calling `StorageConfiguration.getDefault()`. +// Setting up span storage +SignalStorage.Span spanStorage = FileSpanStorage.create(new File(rootDir, "spans")); -After wrapping your exporters, you must register the wrapper as the exporter you'll use. It will -take care of always storing the data it receives. +// Setting up metric storage +SignalStorage.Metric metricStorage = FileMetricStorage.create(new File(rootDir, "metrics")); -#### Set up example for spans +// Setting up log storage +SignalStorage.LogRecord logStorage = FileLogRecordStorage.create(new File(rootDir, "logs")); +``` -### Writing data +### Storing data -The data is written in the disk by "ToDisk" exporters, these are exporters that serialize and store the data as received by their processors. If for some reason -the "ToDisk" cannot store data in the disk, they'll delegate the data to their wrapped exporter. +While you could manually call your `SignalStorage.write(items)` function, disk buffering +provides convenience exporters that you can use in your OpenTelemetry's instance, so +that all signals are automatically stored as they are created. -```java -// Creating the SpanExporter of our choice. -SpanExporter mySpanExporter = OtlpGrpcSpanExporter.getDefault(); +* For a span storage, use + a [SpanToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/SpanToDiskExporter.java). +* For a log storage, use + a [LogRecordToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/LogRecordToDiskExporter.java). +* For a metric storage, use + a [MetricToDiskExporter](src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/MetricToDiskExporter.java). -// Wrapping our exporter with its "ToDisk" exporter. -SpanToDiskExporter toDiskExporter = SpanToDiskExporter.create(mySpanExporter, StorageConfiguration.getDefault(new File("/my/signals/cache/dir"))); +Each will wrap a signal storage for its respective signal type, as well as an optional callback +to notify when it succeeds, fails, and gets shutdown. - // Registering the disk exporter within our OpenTelemetry instance. -SdkTracerProvider myTraceProvider = SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(toDiskExporter)) +```java +// Setting up span to disk exporter +SpanToDiskExporter spanToDiskExporter = + SpanToDiskExporter.builder(spanStorage).setExporterCallback(spanCallback).build(); +// Setting up metric to disk +MetricToDiskExporter metricToDiskExporter = + MetricToDiskExporter.builder(metricStorage).setExporterCallback(metricCallback).build(); +// Setting up log to disk exporter +LogRecordToDiskExporter logToDiskExporter = + LogRecordToDiskExporter.builder(logStorage).setExporterCallback(logCallback).build(); + +// Using exporters in your OpenTelemetry instance. +OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + // Using span to disk exporter + .setTracerProvider( + SdkTracerProvider.builder() + .addSpanProcessor(BatchSpanProcessor.builder(spanToDiskExporter).build()) + .build()) + // Using log to disk exporter + .setLoggerProvider( + SdkLoggerProvider.builder() + .addLogRecordProcessor( + BatchLogRecordProcessor.builder(logToDiskExporter).build()) + .build()) + // Using metric to disk exporter + .setMeterProvider( + SdkMeterProvider.builder() + .registerMetricReader(PeriodicMetricReader.create(metricToDiskExporter)) + .build()) .build(); -OpenTelemetrySdk.builder() - .setTracerProvider(myTraceProvider) - .buildAndRegisterGlobal(); - ``` +Now when creating signals using your `OpenTelemetry` instance, those will get stored in disk. + ### Reading data -In order to read data, we need to create "FromDisk" exporters, which read data from the disk, parse it and delegate it -to their wrapped exporters. +In order to read data, we can iterate through our signal storage objects and then forward them to +a network exporter, as shown in the example for spans below. ```java -try { - SpanFromDiskExporter fromDiskExporter = SpanFromDiskExporter.create(memorySpanExporter, storageConfig); - if(fromDiskExporter.exportStoredBatch(1, TimeUnit.SECONDS)) { - // A batch was successfully exported and removed from disk. You can call this method for as long as it keeps returning true. - } else { - // Either there was no data in the disk or the wrapped exporter returned CompletableResultCode.ofFailure(). - } -} catch (IOException e) { - // Something unexpected happened. +// Example of reading an exporting spans from disk +OtlpHttpSpanExporter networkExporter; +Iterator> spanCollections = spanStorage.iterator(); +while(spanCollections.hasNext()){ + networkExporter.export(spanCollections.next()); } ``` +The `File*Storage` iterators delete the previously returned collection when `next()` is called, +assuming that if the next collection is requested is because the previous one was successfully +consumed. + Both the writing and reading processes can run in parallel and they don't overlap because each is supposed to happen in different files. We ensure that reader and writer don't -accidentally meet in the same file by using the configurable parameters. These parameters set non-overlapping time frames for each action to be done on a single file at a time. On top of that, there's a mechanism in -place to avoid overlapping on edge cases where the time frames ended but the resources haven't been -released. For that mechanism to work properly, this tool assumes that both the reading and the -writing actions are executed within the same application process. +accidentally meet in the same file by using the configurable parameters. These parameters set +non-overlapping time frames for each action to be done on a single file at a time. On top of that, +there's a mechanism in place to avoid overlapping on edge cases where the time frames ended but the +resources haven't been released. For that mechanism to work properly, this tool assumes that both +the reading and the writing actions are executed within the same application process. ## Component owners diff --git a/disk-buffering/assets/reading-flow.png b/disk-buffering/assets/reading-flow.png index 76b8de438..63750e5a3 100644 Binary files a/disk-buffering/assets/reading-flow.png and b/disk-buffering/assets/reading-flow.png differ diff --git a/disk-buffering/assets/writing-flow.png b/disk-buffering/assets/writing-flow.png index c6144b301..b4b21359d 100644 Binary files a/disk-buffering/assets/writing-flow.png and b/disk-buffering/assets/writing-flow.png differ diff --git a/disk-buffering/build.gradle.kts b/disk-buffering/build.gradle.kts index edc8e862c..c36d73588 100644 --- a/disk-buffering/build.gradle.kts +++ b/disk-buffering/build.gradle.kts @@ -1,48 +1,29 @@ import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar -import ru.vyarus.gradle.plugin.animalsniffer.AnimalSniffer plugins { id("otel.java-conventions") id("otel.publish-conventions") - id("com.github.johnrengelman.shadow") + id("otel.animalsniffer-conventions") + id("com.gradleup.shadow") id("me.champeau.jmh") version "0.7.3" - id("ru.vyarus.animalsniffer") version "2.0.0" - id("com.squareup.wire") version "5.3.1" + id("com.squareup.wire") version "5.4.0" } description = "Exporter implementations that store signals on disk" otelJava.moduleName.set("io.opentelemetry.contrib.exporters.disk") -java { - sourceCompatibility = JavaVersion.VERSION_1_8 - targetCompatibility = JavaVersion.VERSION_1_8 -} - val protos by configurations.creating dependencies { api("io.opentelemetry:opentelemetry-sdk") + implementation("io.opentelemetry:opentelemetry-api-incubator") + implementation("io.opentelemetry:opentelemetry-exporter-otlp-common") compileOnly("com.google.auto.value:auto-value-annotations") annotationProcessor("com.google.auto.value:auto-value") - signature("com.toasttab.android:gummy-bears-api-21:0.6.1:coreLib@signature") testImplementation("org.mockito:mockito-inline") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") - protos("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha@jar") -} - -animalsniffer { - sourceSets = listOf(java.sourceSets.main.get()) -} - -// Always having declared output makes this task properly participate in tasks up-to-date checks -tasks.withType { - reports.text.required.set(true) -} - -// Attaching animalsniffer check to the compilation process. -tasks.named("classes").configure { - finalizedBy("animalsnifferMain") + protos("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha@jar") } jmh { @@ -67,15 +48,17 @@ wire { } root( - "opentelemetry.proto.trace.v1.TracesData", - "opentelemetry.proto.metrics.v1.MetricsData", - "opentelemetry.proto.logs.v1.LogsData", + // These are the types used by the Java SDK's OTLP exporters. + "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest", + "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest", + "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest", ) } tasks.named("shadowJar") { archiveClassifier.set("") - configurations = emptyList() // To avoid embedding any dependencies as we only need to rename some local packages. + configurations = + emptyList() // To avoid embedding any dependencies as we only need to rename some local packages. relocate("io.opentelemetry.proto", "io.opentelemetry.diskbuffering.proto") mustRunAfter("jar") } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordFromDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordFromDiskExporter.java deleted file mode 100644 index 7b37ee361..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordFromDiskExporter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.logs.data.LogRecordData; -import io.opentelemetry.sdk.logs.export.LogRecordExporter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -public class LogRecordFromDiskExporter implements FromDiskExporter { - - private final FromDiskExporterImpl delegate; - - public static LogRecordFromDiskExporter create( - LogRecordExporter exporter, StorageConfiguration config) throws IOException { - FromDiskExporterImpl delegate = - FromDiskExporterImpl.builder() - .setFolderName(SignalTypes.logs.name()) - .setStorageConfiguration(config) - .setDeserializer(SignalDeserializer.ofLogs()) - .setExportFunction(exporter::export) - .setDebugEnabled(config.isDebugEnabled()) - .build(); - return new LogRecordFromDiskExporter(delegate); - } - - private LogRecordFromDiskExporter(FromDiskExporterImpl delegate) { - this.delegate = delegate; - } - - @Override - public boolean exportStoredBatch(long timeout, TimeUnit unit) throws IOException { - return delegate.exportStoredBatch(timeout, unit); - } - - @Override - public void shutdown() throws IOException { - delegate.shutdown(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporter.java deleted file mode 100644 index 7570aed8e..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.logs.data.LogRecordData; -import io.opentelemetry.sdk.logs.export.LogRecordExporter; -import java.io.IOException; -import java.util.Collection; - -/** - * This class implements a {@link LogRecordExporter} that delegates to an instance of {@code - * ToDiskExporter}. - */ -public class LogRecordToDiskExporter implements LogRecordExporter { - private final ToDiskExporter delegate; - - /** - * Creates a new LogRecordToDiskExporter that will buffer LogRecordData telemetry on disk storage. - * - * @param delegate - The LogRecordExporter to delegate to if disk writing fails. - * @param config - The StorageConfiguration that specifies how storage is managed. - * @return A new LogRecordToDiskExporter instance. - * @throws IOException if the delegate ToDiskExporter could not be created. - */ - public static LogRecordToDiskExporter create( - LogRecordExporter delegate, StorageConfiguration config) throws IOException { - ToDiskExporter toDisk = - ToDiskExporter.builder() - .setFolderName(SignalTypes.logs.name()) - .setStorageConfiguration(config) - .setSerializer(SignalSerializer.ofLogs()) - .setExportFunction(delegate::export) - .build(); - return new LogRecordToDiskExporter(toDisk); - } - - // Visible for testing - LogRecordToDiskExporter(ToDiskExporter delegate) { - this.delegate = delegate; - } - - @Override - public CompletableResultCode export(Collection logs) { - return delegate.export(logs); - } - - @Override - public CompletableResultCode flush() { - return CompletableResultCode.ofSuccess(); - } - - @Override - public CompletableResultCode shutdown() { - try { - delegate.shutdown(); - return CompletableResultCode.ofSuccess(); - } catch (IOException e) { - return CompletableResultCode.ofFailure(); - } - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricFromDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricFromDiskExporter.java deleted file mode 100644 index bf652f8f8..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricFromDiskExporter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.metrics.data.MetricData; -import io.opentelemetry.sdk.metrics.export.MetricExporter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -public class MetricFromDiskExporter implements FromDiskExporter { - - private final FromDiskExporterImpl delegate; - - public static MetricFromDiskExporter create(MetricExporter exporter, StorageConfiguration config) - throws IOException { - FromDiskExporterImpl delegate = - FromDiskExporterImpl.builder() - .setFolderName(SignalTypes.metrics.name()) - .setStorageConfiguration(config) - .setDeserializer(SignalDeserializer.ofMetrics()) - .setExportFunction(exporter::export) - .setDebugEnabled(config.isDebugEnabled()) - .build(); - return new MetricFromDiskExporter(delegate); - } - - private MetricFromDiskExporter(FromDiskExporterImpl delegate) { - this.delegate = delegate; - } - - @Override - public boolean exportStoredBatch(long timeout, TimeUnit unit) throws IOException { - return delegate.exportStoredBatch(timeout, unit); - } - - @Override - public void shutdown() throws IOException { - delegate.shutdown(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporter.java deleted file mode 100644 index bf2e7066f..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporter.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.metrics.InstrumentType; -import io.opentelemetry.sdk.metrics.data.AggregationTemporality; -import io.opentelemetry.sdk.metrics.data.MetricData; -import io.opentelemetry.sdk.metrics.export.AggregationTemporalitySelector; -import io.opentelemetry.sdk.metrics.export.MetricExporter; -import java.io.IOException; -import java.util.Collection; - -/** - * This class implements a {@link MetricExporter} that delegates to an instance of {@code - * ToDiskExporter}. - */ -public class MetricToDiskExporter implements MetricExporter { - - private final ToDiskExporter delegate; - private final AggregationTemporalitySelector aggregationTemporalitySelector; - - /** - * Creates a new MetricToDiskExporter that will buffer Metric telemetry on disk storage. - * - * @param delegate - The MetricExporter to delegate to if disk writing fails. - * @param config - The StorageConfiguration that specifies how storage is managed. - * @return A new MetricToDiskExporter instance. - * @throws IOException if the delegate ToDiskExporter could not be created. - */ - public static MetricToDiskExporter create(MetricExporter delegate, StorageConfiguration config) - throws IOException { - ToDiskExporter toDisk = - ToDiskExporter.builder() - .setFolderName(SignalTypes.metrics.name()) - .setStorageConfiguration(config) - .setSerializer(SignalSerializer.ofMetrics()) - .setExportFunction(delegate::export) - .build(); - return new MetricToDiskExporter(toDisk, delegate); - } - - // VisibleForTesting - MetricToDiskExporter( - ToDiskExporter delegate, AggregationTemporalitySelector selector) { - this.delegate = delegate; - this.aggregationTemporalitySelector = selector; - } - - @Override - public CompletableResultCode export(Collection metrics) { - return delegate.export(metrics); - } - - @Override - public CompletableResultCode flush() { - return CompletableResultCode.ofSuccess(); - } - - @Override - public CompletableResultCode shutdown() { - try { - delegate.shutdown(); - } catch (IOException e) { - return CompletableResultCode.ofFailure(); - } - return CompletableResultCode.ofSuccess(); - } - - @Override - public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { - return aggregationTemporalitySelector.getAggregationTemporality(instrumentType); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporter.java deleted file mode 100644 index c23ac043e..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -public class SpanFromDiskExporter implements FromDiskExporter { - - private final FromDiskExporterImpl delegate; - - public static SpanFromDiskExporter create(SpanExporter exporter, StorageConfiguration config) - throws IOException { - FromDiskExporterImpl delegate = - FromDiskExporterImpl.builder() - .setFolderName(SignalTypes.spans.name()) - .setStorageConfiguration(config) - .setDeserializer(SignalDeserializer.ofSpans()) - .setExportFunction(exporter::export) - .setDebugEnabled(config.isDebugEnabled()) - .build(); - return new SpanFromDiskExporter(delegate); - } - - private SpanFromDiskExporter(FromDiskExporterImpl delegate) { - this.delegate = delegate; - } - - @Override - public boolean exportStoredBatch(long timeout, TimeUnit unit) throws IOException { - return delegate.exportStoredBatch(timeout, unit); - } - - @Override - public void shutdown() throws IOException { - delegate.shutdown(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporter.java deleted file mode 100644 index d64a4cd71..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporter.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.io.IOException; -import java.util.Collection; - -/** - * This class implements a SpanExporter that delegates to an instance of {@code - * ToDiskExporter}. - */ -public class SpanToDiskExporter implements SpanExporter { - - private final ToDiskExporter delegate; - - /** - * Creates a new SpanToDiskExporter that will buffer Span telemetry on disk storage. - * - * @param delegate - The SpanExporter to delegate to if disk writing fails. - * @param config - The StorageConfiguration that specifies how storage is managed. - * @return A new SpanToDiskExporter instance. - * @throws IOException if the delegate ToDiskExporter could not be created. - */ - public static SpanToDiskExporter create(SpanExporter delegate, StorageConfiguration config) - throws IOException { - ToDiskExporter toDisk = - ToDiskExporter.builder() - .setFolderName(SignalTypes.spans.name()) - .setStorageConfiguration(config) - .setSerializer(SignalSerializer.ofSpans()) - .setExportFunction(delegate::export) - .build(); - return new SpanToDiskExporter(toDisk); - } - - // Visible for testing - SpanToDiskExporter(ToDiskExporter delegate) { - this.delegate = delegate; - } - - @Override - public CompletableResultCode export(Collection spans) { - return delegate.export(spans); - } - - @Override - public CompletableResultCode flush() { - return CompletableResultCode.ofSuccess(); - } - - @Override - public CompletableResultCode shutdown() { - try { - delegate.shutdown(); - } catch (IOException e) { - return CompletableResultCode.ofFailure(); - } - return CompletableResultCode.ofSuccess(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/TemporaryFileProvider.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/TemporaryFileProvider.java deleted file mode 100644 index 3cf803f9f..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/TemporaryFileProvider.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.config; - -import java.io.File; -import java.io.IOException; - -/** Provides a temporary file needed to do the disk reading process. */ -public interface TemporaryFileProvider { - - /** - * Creates a temporary file. - * - * @param prefix The prefix for the provided file name. - */ - File createTemporaryFile(String prefix) throws IOException; -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/LogRecordToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/LogRecordToDiskExporter.java new file mode 100644 index 000000000..6ed7ae2b4 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/LogRecordToDiskExporter.java @@ -0,0 +1,81 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.exporters; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.NoopExporterCallback; +import io.opentelemetry.contrib.disk.buffering.internal.exporters.SignalStorageExporter; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.logs.data.LogRecordData; +import io.opentelemetry.sdk.logs.export.LogRecordExporter; +import java.time.Duration; +import java.util.Collection; + +/** Exporter that stores logs into disk. */ +public final class LogRecordToDiskExporter implements LogRecordExporter { + private final SignalStorageExporter storageExporter; + private final ExporterCallback callback; + private static final ExporterCallback DEFAULT_CALLBACK = + new NoopExporterCallback<>(); + private static final Duration DEFAULT_EXPORT_TIMEOUT = Duration.ofSeconds(10); + + private LogRecordToDiskExporter( + SignalStorageExporter storageExporter, + ExporterCallback callback) { + this.storageExporter = storageExporter; + this.callback = callback; + } + + public static Builder builder(SignalStorage.LogRecord storage) { + return new Builder(storage); + } + + @Override + public CompletableResultCode export(Collection logs) { + return storageExporter.exportToStorage(logs); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + callback.onShutdown(); + return CompletableResultCode.ofSuccess(); + } + + public static final class Builder { + private final SignalStorage.LogRecord storage; + private ExporterCallback callback = DEFAULT_CALLBACK; + private Duration writeTimeout = DEFAULT_EXPORT_TIMEOUT; + + @CanIgnoreReturnValue + public Builder setExporterCallback(ExporterCallback value) { + callback = value; + return this; + } + + @CanIgnoreReturnValue + public Builder setWriteTimeout(Duration value) { + writeTimeout = value; + return this; + } + + public LogRecordToDiskExporter build() { + SignalStorageExporter storageExporter = + new SignalStorageExporter<>(storage, callback, writeTimeout); + return new LogRecordToDiskExporter(storageExporter, callback); + } + + private Builder(SignalStorage.LogRecord storage) { + this.storage = storage; + } + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/MetricToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/MetricToDiskExporter.java new file mode 100644 index 000000000..fe7a86abf --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/MetricToDiskExporter.java @@ -0,0 +1,99 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.exporters; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.NoopExporterCallback; +import io.opentelemetry.contrib.disk.buffering.internal.exporters.SignalStorageExporter; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.AggregationTemporalitySelector; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.time.Duration; +import java.util.Collection; + +/** Exporter that stores metrics into disk. */ +public final class MetricToDiskExporter implements MetricExporter { + private final SignalStorageExporter storageExporter; + private final AggregationTemporalitySelector aggregationTemporalitySelector; + private final ExporterCallback callback; + private static final ExporterCallback DEFAULT_CALLBACK = new NoopExporterCallback<>(); + private static final Duration DEFAULT_EXPORT_TIMEOUT = Duration.ofSeconds(10); + + private MetricToDiskExporter( + SignalStorageExporter storageExporter, + AggregationTemporalitySelector aggregationTemporalitySelector, + ExporterCallback callback) { + this.storageExporter = storageExporter; + this.aggregationTemporalitySelector = aggregationTemporalitySelector; + this.callback = callback; + } + + public static Builder builder(SignalStorage.Metric storage) { + return new Builder(storage); + } + + @Override + public CompletableResultCode export(Collection metrics) { + return storageExporter.exportToStorage(metrics); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + callback.onShutdown(); + return CompletableResultCode.ofSuccess(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return aggregationTemporalitySelector.getAggregationTemporality(instrumentType); + } + + public static final class Builder { + private final SignalStorage.Metric storage; + private AggregationTemporalitySelector aggregationTemporalitySelector = + AggregationTemporalitySelector.alwaysCumulative(); + private ExporterCallback callback = DEFAULT_CALLBACK; + private Duration writeTimeout = DEFAULT_EXPORT_TIMEOUT; + + @CanIgnoreReturnValue + public Builder setExporterCallback(ExporterCallback value) { + callback = value; + return this; + } + + @CanIgnoreReturnValue + public Builder setWriteTimeout(Duration value) { + writeTimeout = value; + return this; + } + + @CanIgnoreReturnValue + public Builder setAggregationTemporalitySelector(AggregationTemporalitySelector value) { + aggregationTemporalitySelector = value; + return this; + } + + public MetricToDiskExporter build() { + SignalStorageExporter storageExporter = + new SignalStorageExporter<>(storage, callback, writeTimeout); + return new MetricToDiskExporter(storageExporter, aggregationTemporalitySelector, callback); + } + + private Builder(SignalStorage.Metric storage) { + this.storage = storage; + } + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/SpanToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/SpanToDiskExporter.java new file mode 100644 index 000000000..9558a2767 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/SpanToDiskExporter.java @@ -0,0 +1,79 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.exporters; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.NoopExporterCallback; +import io.opentelemetry.contrib.disk.buffering.internal.exporters.SignalStorageExporter; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.time.Duration; +import java.util.Collection; + +/** Exporter that stores spans into disk. */ +public final class SpanToDiskExporter implements SpanExporter { + private final SignalStorageExporter storageExporter; + private final ExporterCallback callback; + private static final ExporterCallback DEFAULT_CALLBACK = new NoopExporterCallback<>(); + private static final Duration DEFAULT_EXPORT_TIMEOUT = Duration.ofSeconds(10); + + private SpanToDiskExporter( + SignalStorageExporter storageExporter, ExporterCallback callback) { + this.storageExporter = storageExporter; + this.callback = callback; + } + + public static Builder builder(SignalStorage.Span storage) { + return new Builder(storage); + } + + @Override + public CompletableResultCode export(Collection spans) { + return storageExporter.exportToStorage(spans); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + callback.onShutdown(); + return CompletableResultCode.ofSuccess(); + } + + public static final class Builder { + private final SignalStorage.Span storage; + private ExporterCallback callback = DEFAULT_CALLBACK; + private Duration writeTimeout = DEFAULT_EXPORT_TIMEOUT; + + private Builder(SignalStorage.Span storage) { + this.storage = storage; + } + + @CanIgnoreReturnValue + public Builder setExporterCallback(ExporterCallback value) { + callback = value; + return this; + } + + @CanIgnoreReturnValue + public Builder setWriteTimeout(Duration value) { + writeTimeout = value; + return this; + } + + public SpanToDiskExporter build() { + SignalStorageExporter storageExporter = + new SignalStorageExporter<>(storage, callback, writeTimeout); + return new SpanToDiskExporter(storageExporter, callback); + } + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/ExporterCallback.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/ExporterCallback.java new file mode 100644 index 000000000..9c3c816ea --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/ExporterCallback.java @@ -0,0 +1,30 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.exporters.callback; + +import java.util.Collection; +import javax.annotation.Nullable; + +/** Notifies about exporter and storage-related operations from within a signal to disk exporter. */ +public interface ExporterCallback { + /** + * Called when an export to disk operation succeeded. + * + * @param items The items successfully stored in disk. + */ + void onExportSuccess(Collection items); + + /** + * Called when an export to disk operation failed. + * + * @param items The items that couldn't get stored in disk. + * @param error Optional - provides more information of why the operation failed. + */ + void onExportError(Collection items, @Nullable Throwable error); + + /** Called when the exporter is closed. */ + void onShutdown(); +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/NoopExporterCallback.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/NoopExporterCallback.java new file mode 100644 index 000000000..6313d1a5b --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/callback/NoopExporterCallback.java @@ -0,0 +1,21 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.exporters.callback; + +import java.util.Collection; +import javax.annotation.Nullable; + +public final class NoopExporterCallback implements ExporterCallback { + + @Override + public void onExportSuccess(Collection items) {} + + @Override + public void onExportError(Collection items, @Nullable Throwable error) {} + + @Override + public void onShutdown() {} +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/package-info.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/package-info.java new file mode 100644 index 000000000..6a367dca9 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/exporters/package-info.java @@ -0,0 +1,9 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +@ParametersAreNonnullByDefault +package io.opentelemetry.contrib.disk.buffering.exporters; + +import javax.annotation.ParametersAreNonnullByDefault; diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporter.java deleted file mode 100644 index fdc3bb796..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporter.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -public interface FromDiskExporter { - boolean exportStoredBatch(long timeout, TimeUnit unit) throws IOException; - - void shutdown() throws IOException; -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterBuilder.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterBuilder.java deleted file mode 100644 index eec298469..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterBuilder.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import static java.util.Collections.emptyList; - -import com.google.errorprone.annotations.CanIgnoreReturnValue; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.contrib.disk.buffering.internal.storage.StorageBuilder; -import io.opentelemetry.sdk.common.Clock; -import io.opentelemetry.sdk.common.CompletableResultCode; -import java.io.IOException; -import java.util.Collection; -import java.util.function.Function; -import org.jetbrains.annotations.NotNull; - -public class FromDiskExporterBuilder { - - private SignalDeserializer serializer = noopDeserializer(); - private Function, CompletableResultCode> exportFunction = - x -> CompletableResultCode.ofFailure(); - - private boolean debugEnabled = false; - - @NotNull - private static SignalDeserializer noopDeserializer() { - return x -> emptyList(); - } - - private final StorageBuilder storageBuilder = Storage.builder(); - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setFolderName(String folderName) { - storageBuilder.setFolderName(folderName); - return this; - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setStorageConfiguration(StorageConfiguration configuration) { - storageBuilder.setStorageConfiguration(configuration); - return this; - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setStorageClock(Clock clock) { - storageBuilder.setStorageClock(clock); - return this; - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setDeserializer(SignalDeserializer serializer) { - this.serializer = serializer; - return this; - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setExportFunction( - Function, CompletableResultCode> exportFunction) { - this.exportFunction = exportFunction; - return this; - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder enableDebug() { - return setDebugEnabled(true); - } - - @CanIgnoreReturnValue - public FromDiskExporterBuilder setDebugEnabled(boolean debugEnabled) { - this.debugEnabled = debugEnabled; - return this; - } - - public FromDiskExporterImpl build() throws IOException { - Storage storage = storageBuilder.build(); - return new FromDiskExporterImpl<>(serializer, exportFunction, storage, debugEnabled); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterImpl.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterImpl.java deleted file mode 100644 index 19ef6fe2c..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/FromDiskExporterImpl.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.DeserializationException; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.ProcessResult; -import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.ReadableResult; -import io.opentelemetry.contrib.disk.buffering.internal.utils.DebugLogger; -import io.opentelemetry.sdk.common.CompletableResultCode; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.logging.Logger; - -/** - * Signal-type generic class that can read telemetry previously buffered on disk and send it to - * another delegated exporter. - */ -public final class FromDiskExporterImpl implements FromDiskExporter { - private final DebugLogger logger; - private final Storage storage; - private final SignalDeserializer deserializer; - private final Function, CompletableResultCode> exportFunction; - - FromDiskExporterImpl( - SignalDeserializer deserializer, - Function, CompletableResultCode> exportFunction, - Storage storage, - boolean debugEnabled) { - this.deserializer = deserializer; - this.exportFunction = exportFunction; - this.storage = storage; - this.logger = - DebugLogger.wrap(Logger.getLogger(FromDiskExporterImpl.class.getName()), debugEnabled); - } - - public static FromDiskExporterBuilder builder() { - return new FromDiskExporterBuilder<>(); - } - - /** - * Reads data from the disk and attempts to export it. - * - * @param timeout The amount of time to wait for the wrapped exporter to finish. - * @param unit The unit of the time provided. - * @return true if there was data available, and it was successfully exported within the timeout - * provided. false otherwise. - * @throws IOException If an unexpected error happens. - */ - @Override - public boolean exportStoredBatch(long timeout, TimeUnit unit) throws IOException { - logger.log("Attempting to export " + deserializer.signalType() + " batch from disk."); - ReadableResult result = - storage.readAndProcess( - bytes -> { - logger.log( - "Read " - + bytes.length - + " " - + deserializer.signalType() - + " bytes from storage."); - try { - List telemetry = deserializer.deserialize(bytes); - logger.log( - "Now exporting batch of " + telemetry.size() + " " + deserializer.signalType()); - CompletableResultCode join = exportFunction.apply(telemetry).join(timeout, unit); - return join.isSuccess() ? ProcessResult.SUCCEEDED : ProcessResult.TRY_LATER; - } catch (DeserializationException e) { - return ProcessResult.CONTENT_INVALID; - } - }); - return result == ReadableResult.SUCCEEDED; - } - - @Override - public void shutdown() throws IOException { - storage.close(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporter.java deleted file mode 100644 index 1a43cb5eb..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.contrib.disk.buffering.internal.utils.DebugLogger; -import io.opentelemetry.sdk.common.CompletableResultCode; -import java.io.IOException; -import java.util.Collection; -import java.util.function.Function; -import java.util.logging.Level; -import java.util.logging.Logger; - -public class ToDiskExporter { - - private final DebugLogger logger; - private final Storage storage; - private final SignalSerializer serializer; - private final Function, CompletableResultCode> exportFunction; - - ToDiskExporter( - SignalSerializer serializer, - Function, CompletableResultCode> exportFunction, - Storage storage, - boolean debugEnabled) { - this.serializer = serializer; - this.exportFunction = exportFunction; - this.storage = storage; - this.logger = DebugLogger.wrap(Logger.getLogger(ToDiskExporter.class.getName()), debugEnabled); - } - - public static ToDiskExporterBuilder builder() { - return new ToDiskExporterBuilder<>(); - } - - public CompletableResultCode export(Collection data) { - logger.log("Intercepting exporter batch.", Level.FINER); - try { - if (storage.write(serializer.serialize(data))) { - return CompletableResultCode.ofSuccess(); - } - logger.log("Could not store batch in disk. Exporting it right away."); - return exportFunction.apply(data); - } catch (IOException e) { - logger.log( - "An unexpected error happened while attempting to write the data in disk. Exporting it right away.", - Level.WARNING, - e); - return exportFunction.apply(data); - } - } - - public void shutdown() throws IOException { - storage.close(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilder.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilder.java deleted file mode 100644 index 3ac7d2503..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilder.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import com.google.errorprone.annotations.CanIgnoreReturnValue; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.contrib.disk.buffering.internal.storage.StorageBuilder; -import io.opentelemetry.sdk.common.Clock; -import io.opentelemetry.sdk.common.CompletableResultCode; -import java.io.IOException; -import java.util.Collection; -import java.util.function.Function; - -public final class ToDiskExporterBuilder { - - private SignalSerializer serializer = ts -> new byte[0]; - - private final StorageBuilder storageBuilder = Storage.builder(); - - private Function, CompletableResultCode> exportFunction = - x -> CompletableResultCode.ofFailure(); - private boolean debugEnabled = false; - - ToDiskExporterBuilder() {} - - @CanIgnoreReturnValue - public ToDiskExporterBuilder enableDebug() { - return setDebugEnabled(true); - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setDebugEnabled(boolean debugEnabled) { - this.debugEnabled = debugEnabled; - return this; - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setFolderName(String folderName) { - storageBuilder.setFolderName(folderName); - return this; - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setStorageConfiguration(StorageConfiguration configuration) { - validateConfiguration(configuration); - storageBuilder.setStorageConfiguration(configuration); - return this; - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setStorageClock(Clock clock) { - storageBuilder.setStorageClock(clock); - return this; - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setSerializer(SignalSerializer serializer) { - this.serializer = serializer; - return this; - } - - @CanIgnoreReturnValue - public ToDiskExporterBuilder setExportFunction( - Function, CompletableResultCode> exportFunction) { - this.exportFunction = exportFunction; - return this; - } - - public ToDiskExporter build() throws IOException { - Storage storage = storageBuilder.build(); - return new ToDiskExporter<>(serializer, exportFunction, storage, debugEnabled); - } - - private static void validateConfiguration(StorageConfiguration configuration) { - if (configuration.getMinFileAgeForReadMillis() <= configuration.getMaxFileAgeForWriteMillis()) { - throw new IllegalArgumentException( - "The configured max file age for writing must be lower than the configured min file age for reading"); - } - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporter.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporter.java new file mode 100644 index 000000000..22ba6b61c --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporter.java @@ -0,0 +1,53 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.exporters; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.common.CompletableResultCode; +import java.time.Duration; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +/** Internal utility for common export to disk operations across all exporters. */ +public final class SignalStorageExporter { + private final SignalStorage storage; + private final ExporterCallback callback; + private final Duration writeTimeout; + + public SignalStorageExporter( + SignalStorage storage, ExporterCallback callback, Duration writeTimeout) { + this.storage = storage; + this.callback = callback; + this.writeTimeout = writeTimeout; + } + + public CompletableResultCode exportToStorage(Collection items) { + CompletableFuture future = storage.write(items); + try { + WriteResult operation = future.get(writeTimeout.toMillis(), MILLISECONDS); + if (operation.isSuccessful()) { + callback.onExportSuccess(items); + return CompletableResultCode.ofSuccess(); + } + + Throwable error = operation.getError(); + callback.onExportError(items, error); + if (error != null) { + return CompletableResultCode.ofExceptionalFailure(error); + } + return CompletableResultCode.ofFailure(); + } catch (ExecutionException | InterruptedException | TimeoutException e) { + callback.onExportError(items, e); + return CompletableResultCode.ofExceptionalFailure(e); + } + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/files/DefaultTemporaryFileProvider.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/files/DefaultTemporaryFileProvider.java deleted file mode 100644 index 9a9dfb8e6..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/files/DefaultTemporaryFileProvider.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.files; - -import io.opentelemetry.contrib.disk.buffering.config.TemporaryFileProvider; -import java.io.File; -import java.io.IOException; - -public final class DefaultTemporaryFileProvider implements TemporaryFileProvider { - private static final TemporaryFileProvider INSTANCE = new DefaultTemporaryFileProvider(); - - public static TemporaryFileProvider getInstance() { - return INSTANCE; - } - - private DefaultTemporaryFileProvider() {} - - @Override - public File createTemporaryFile(String prefix) throws IOException { - return File.createTempFile(prefix + "_", ".tmp"); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializer.java index 5ac0007d9..d4c877923 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializer.java @@ -6,8 +6,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.ProtoLogsDataMapper; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.proto.logs.v1.LogsData; +import io.opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest; import io.opentelemetry.sdk.logs.data.LogRecordData; import java.io.IOException; import java.util.List; @@ -24,14 +23,10 @@ static LogRecordDataDeserializer getInstance() { @Override public List deserialize(byte[] source) throws DeserializationException { try { - return ProtoLogsDataMapper.getInstance().fromProto(LogsData.ADAPTER.decode(source)); - } catch (IOException e) { + return ProtoLogsDataMapper.getInstance() + .fromProto(ExportLogsServiceRequest.ADAPTER.decode(source)); + } catch (IOException | IllegalStateException e) { throw new DeserializationException(e); } } - - @Override - public String signalType() { - return SignalTypes.logs.name(); - } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializer.java index 34e88b3ef..eda886f89 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializer.java @@ -6,8 +6,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.metrics.ProtoMetricsDataMapper; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.proto.metrics.v1.MetricsData; +import io.opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest; import io.opentelemetry.sdk.metrics.data.MetricData; import java.io.IOException; import java.util.List; @@ -24,14 +23,10 @@ static MetricDataDeserializer getInstance() { @Override public List deserialize(byte[] source) throws DeserializationException { try { - return ProtoMetricsDataMapper.getInstance().fromProto(MetricsData.ADAPTER.decode(source)); - } catch (IOException e) { + return ProtoMetricsDataMapper.getInstance() + .fromProto(ExportMetricsServiceRequest.ADAPTER.decode(source)); + } catch (IOException | IllegalStateException e) { throw new DeserializationException(e); } } - - @Override - public String signalType() { - return SignalTypes.metrics.name(); - } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SignalDeserializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SignalDeserializer.java index dd56e356e..915868288 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SignalDeserializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SignalDeserializer.java @@ -26,9 +26,4 @@ static SignalDeserializer ofLogs() { /** Deserializes the given byte array into a list of telemetry items. */ List deserialize(byte[] source) throws DeserializationException; - - /** Returns the name of the stored type of signal -- one of "metrics", "spans", or "logs". */ - default String signalType() { - return "unknown"; - } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializer.java index 457d5f268..b3631ca4c 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializer.java @@ -6,8 +6,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.ProtoSpansDataMapper; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.proto.trace.v1.TracesData; +import io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest; import io.opentelemetry.sdk.trace.data.SpanData; import java.io.IOException; import java.util.List; @@ -24,14 +23,10 @@ static SpanDataDeserializer getInstance() { @Override public List deserialize(byte[] source) throws DeserializationException { try { - return ProtoSpansDataMapper.getInstance().fromProto(TracesData.ADAPTER.decode(source)); - } catch (IOException e) { + return ProtoSpansDataMapper.getInstance() + .fromProto(ExportTraceServiceRequest.ADAPTER.decode(source)); + } catch (IOException | IllegalStateException e) { throw new DeserializationException(e); } } - - @Override - public String signalType() { - return SignalTypes.spans.name(); - } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapper.java index e017cb878..631751349 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapper.java @@ -85,7 +85,13 @@ private static void addValue(AttributesBuilder builder, String key, AnyValue val } else if (value.array_value != null) { addArray(builder, key, value.array_value); } else { - throw new UnsupportedOperationException(); + // Until we have complex attribute types that could potentially yield + // empty objects, we MUST assume here that the writer put an empty string + // into the value of the attribute. This will need to change later, when complex + // types arrive and the spec issue is resolved. + // + // See spec issue: https://github.com/open-telemetry/opentelemetry-specification/issues/4660 + builder.put(AttributeKey.stringKey(key), ""); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ByteStringMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ByteStringMapper.java index ca8366e8a..1234d25de 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ByteStringMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ByteStringMapper.java @@ -16,10 +16,10 @@ public static ByteStringMapper getInstance() { } public ByteString stringToProto(String source) { - return ByteString.encodeUtf8(source); + return ByteString.decodeHex(source); } public String protoToString(ByteString source) { - return source.utf8(); + return source.hex(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapper.java index 06ff85847..d9cafa4a4 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapper.java @@ -64,6 +64,9 @@ private static void addExtrasToProtoBuilder(LogRecordData source, LogRecord.Buil target.trace_id(ByteStringMapper.getInstance().stringToProto(spanContext.getTraceId())); target.dropped_attributes_count( source.getTotalAttributeCount() - source.getAttributes().size()); + if (source.getEventName() != null) { + target.event_name(source.getEventName()); + } } public LogRecordData mapToSdk( @@ -99,6 +102,7 @@ private static void addExtrasToSdkItemBuilder( target.setTotalAttributeCount(source.dropped_attributes_count + attributes.size()); target.setResource(resource); target.setInstrumentationScopeInfo(scopeInfo); + target.setEventName(source.event_name); } private static AnyValue bodyToAnyValue(Value body) { diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapper.java index 1d11c177f..021935f9a 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapper.java @@ -6,8 +6,8 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.common.BaseProtoSignalsDataMapper; +import io.opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest; import io.opentelemetry.proto.logs.v1.LogRecord; -import io.opentelemetry.proto.logs.v1.LogsData; import io.opentelemetry.proto.logs.v1.ResourceLogs; import io.opentelemetry.proto.logs.v1.ScopeLogs; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; @@ -19,7 +19,7 @@ public final class ProtoLogsDataMapper extends BaseProtoSignalsDataMapper< - LogRecordData, LogRecord, LogsData, ResourceLogs, ScopeLogs> { + LogRecordData, LogRecord, ExportLogsServiceRequest, ResourceLogs, ScopeLogs> { private static final ProtoLogsDataMapper INSTANCE = new ProtoLogsDataMapper(); @@ -39,12 +39,12 @@ protected LogRecordData protoToSignalItem( } @Override - protected List getProtoResources(LogsData logsData) { + protected List getProtoResources(ExportLogsServiceRequest logsData) { return logsData.resource_logs; } @Override - protected LogsData createProtoData( + protected ExportLogsServiceRequest createProtoData( Map>> itemsByResource) { List items = new ArrayList<>(); itemsByResource.forEach( @@ -58,7 +58,7 @@ protected LogsData createProtoData( } items.add(resourceLogsBuilder.build()); }); - return new LogsData.Builder().resource_logs(items).build(); + return new ExportLogsServiceRequest.Builder().resource_logs(items).build(); } private ScopeLogs.Builder createProtoScopeBuilder(InstrumentationScopeInfo scopeInfo) { diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/models/LogRecordDataImpl.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/models/LogRecordDataImpl.java index 9ff0f9410..2979f96fb 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/models/LogRecordDataImpl.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/models/LogRecordDataImpl.java @@ -6,18 +6,18 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.models; import com.google.auto.value.AutoValue; -import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.Value; +import io.opentelemetry.api.incubator.common.ExtendedAttributes; import io.opentelemetry.api.logs.Severity; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; -import io.opentelemetry.sdk.logs.data.LogRecordData; +import io.opentelemetry.sdk.logs.data.internal.ExtendedLogRecordData; import io.opentelemetry.sdk.resources.Resource; import javax.annotation.Nullable; @AutoValue -public abstract class LogRecordDataImpl implements LogRecordData { +public abstract class LogRecordDataImpl implements ExtendedLogRecordData { public static Builder builder() { return new AutoValue_LogRecordDataImpl.Builder(); @@ -31,10 +31,25 @@ public io.opentelemetry.sdk.logs.data.Body getBody() { : io.opentelemetry.sdk.logs.data.Body.string(valueBody.asString()); } + @Override + public ExtendedAttributes getExtendedAttributes() { + return ExtendedAttributes.builder().putAll(getAttributes()).build(); + } + + // It's only deprecated in the incubating interface for extended attributes, which are not yet + // supported in this module. + @SuppressWarnings("deprecation") + @Override + public abstract Attributes getAttributes(); + @Override @Nullable public abstract Value getBodyValue(); + @Override + @Nullable + public abstract String getEventName(); + @AutoValue.Builder public abstract static class Builder { public abstract Builder setResource(Resource value); @@ -51,23 +66,14 @@ public abstract static class Builder { public abstract Builder setSeverityText(String value); - @Deprecated - @CanIgnoreReturnValue - public Builder setBody(io.opentelemetry.sdk.logs.data.Body body) { - if (body.getType() == io.opentelemetry.sdk.logs.data.Body.Type.STRING) { - setBodyValue(Value.of(body.asString())); - } else if (body.getType() == io.opentelemetry.sdk.logs.data.Body.Type.EMPTY) { - setBodyValue(null); - } - return this; - } - public abstract Builder setBodyValue(@Nullable Value value); public abstract Builder setAttributes(Attributes value); public abstract Builder setTotalAttributeCount(Integer value); + public abstract Builder setEventName(String value); + public abstract LogRecordDataImpl build(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapper.java index 1b9f2bf90..46bf32493 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapper.java @@ -766,8 +766,8 @@ private static Attributes protoToAttributes(List source) { } private static final class DataWithType { - public final Data data; - public final MetricDataType type; + final Data data; + final MetricDataType type; private DataWithType(Data data, MetricDataType type) { this.data = data; diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapper.java index a81ab9957..ad67eee1c 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapper.java @@ -6,8 +6,8 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.metrics; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.common.BaseProtoSignalsDataMapper; +import io.opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest; import io.opentelemetry.proto.metrics.v1.Metric; -import io.opentelemetry.proto.metrics.v1.MetricsData; import io.opentelemetry.proto.metrics.v1.ResourceMetrics; import io.opentelemetry.proto.metrics.v1.ScopeMetrics; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; @@ -19,7 +19,7 @@ public final class ProtoMetricsDataMapper extends BaseProtoSignalsDataMapper< - MetricData, Metric, MetricsData, ResourceMetrics, ScopeMetrics> { + MetricData, Metric, ExportMetricsServiceRequest, ResourceMetrics, ScopeMetrics> { private static final ProtoMetricsDataMapper INSTANCE = new ProtoMetricsDataMapper(); @@ -39,12 +39,12 @@ protected MetricData protoToSignalItem( } @Override - protected List getProtoResources(MetricsData protoData) { + protected List getProtoResources(ExportMetricsServiceRequest protoData) { return protoData.resource_metrics; } @Override - protected MetricsData createProtoData( + protected ExportMetricsServiceRequest createProtoData( Map>> itemsByResource) { List items = new ArrayList<>(); itemsByResource.forEach( @@ -58,7 +58,7 @@ protected MetricsData createProtoData( } items.add(resourceMetricsBuilder.build()); }); - return new MetricsData.Builder().resource_metrics(items).build(); + return new ExportMetricsServiceRequest.Builder().resource_metrics(items).build(); } private ScopeMetrics.Builder createProtoScopeBuilder(InstrumentationScopeInfo scopeInfo) { diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapper.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapper.java index 18acf3a1f..12697c49d 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapper.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapper.java @@ -6,10 +6,10 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.common.BaseProtoSignalsDataMapper; +import io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest; import io.opentelemetry.proto.trace.v1.ResourceSpans; import io.opentelemetry.proto.trace.v1.ScopeSpans; import io.opentelemetry.proto.trace.v1.Span; -import io.opentelemetry.proto.trace.v1.TracesData; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.data.SpanData; @@ -18,7 +18,8 @@ import java.util.Map; public final class ProtoSpansDataMapper - extends BaseProtoSignalsDataMapper { + extends BaseProtoSignalsDataMapper< + SpanData, Span, ExportTraceServiceRequest, ResourceSpans, ScopeSpans> { private static final ProtoSpansDataMapper INSTANCE = new ProtoSpansDataMapper(); @@ -32,7 +33,7 @@ protected Span signalItemToProto(SpanData sourceData) { } @Override - protected List getProtoResources(TracesData protoData) { + protected List getProtoResources(ExportTraceServiceRequest protoData) { return protoData.resource_spans; } @@ -43,7 +44,7 @@ protected SpanData protoToSignalItem( } @Override - protected TracesData createProtoData( + protected ExportTraceServiceRequest createProtoData( Map>> itemsByResource) { List items = new ArrayList<>(); itemsByResource.forEach( @@ -57,7 +58,7 @@ protected TracesData createProtoData( } items.add(resourceSpansBuilder.build()); }); - return new TracesData.Builder().resource_spans(items).build(); + return new ExportTraceServiceRequest.Builder().resource_spans(items).build(); } @Override diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializer.java index 72c654ffe..19bb1cf93 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializer.java @@ -5,33 +5,41 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.ProtoLogsDataMapper; +import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.contrib.disk.buffering.internal.utils.ProtobufTools; -import io.opentelemetry.proto.logs.v1.LogsData; +import io.opentelemetry.exporter.internal.otlp.logs.LowAllocationLogsRequestMarshaler; import io.opentelemetry.sdk.logs.data.LogRecordData; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.util.Collection; public final class LogRecordDataSerializer implements SignalSerializer { - private static final LogRecordDataSerializer INSTANCE = new LogRecordDataSerializer(); - private LogRecordDataSerializer() {} + private final LowAllocationLogsRequestMarshaler marshaler = + new LowAllocationLogsRequestMarshaler(); - static LogRecordDataSerializer getInstance() { - return INSTANCE; + LogRecordDataSerializer() {} + + @CanIgnoreReturnValue + @Override + public LogRecordDataSerializer initialize(Collection data) { + marshaler.initialize(data); + return this; + } + + @Override + public void writeBinaryTo(OutputStream output) throws IOException { + ProtobufTools.writeRawVarint32(marshaler.getBinarySerializedSize(), output); + marshaler.writeBinaryTo(output); + } + + @Override + public int getBinarySerializedSize() { + return marshaler.getBinarySerializedSize(); } @Override - public byte[] serialize(Collection logRecordData) { - LogsData proto = ProtoLogsDataMapper.getInstance().toProto(logRecordData); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - int size = LogsData.ADAPTER.encodedSize(proto); - ProtobufTools.writeRawVarint32(size, out); - proto.encode(out); - return out.toByteArray(); - } catch (IOException e) { - throw new IllegalStateException(e); - } + public void reset() { + marshaler.reset(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/MetricDataSerializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/MetricDataSerializer.java index 077d4ade5..726b3185d 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/MetricDataSerializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/MetricDataSerializer.java @@ -5,33 +5,41 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.metrics.ProtoMetricsDataMapper; +import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.contrib.disk.buffering.internal.utils.ProtobufTools; -import io.opentelemetry.proto.metrics.v1.MetricsData; +import io.opentelemetry.exporter.internal.otlp.metrics.LowAllocationMetricsRequestMarshaler; import io.opentelemetry.sdk.metrics.data.MetricData; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.util.Collection; public final class MetricDataSerializer implements SignalSerializer { - private static final MetricDataSerializer INSTANCE = new MetricDataSerializer(); - private MetricDataSerializer() {} + private final LowAllocationMetricsRequestMarshaler marshaler = + new LowAllocationMetricsRequestMarshaler(); - static MetricDataSerializer getInstance() { - return INSTANCE; + MetricDataSerializer() {} + + @CanIgnoreReturnValue + @Override + public MetricDataSerializer initialize(Collection data) { + marshaler.initialize(data); + return this; + } + + @Override + public void writeBinaryTo(OutputStream output) throws IOException { + ProtobufTools.writeRawVarint32(marshaler.getBinarySerializedSize(), output); + marshaler.writeBinaryTo(output); + } + + @Override + public int getBinarySerializedSize() { + return marshaler.getBinarySerializedSize(); } @Override - public byte[] serialize(Collection metricData) { - MetricsData proto = ProtoMetricsDataMapper.getInstance().toProto(metricData); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - int size = MetricsData.ADAPTER.encodedSize(proto); - ProtobufTools.writeRawVarint32(size, out); - proto.encode(out); - return out.toByteArray(); - } catch (IOException e) { - throw new IllegalStateException(e); - } + public void reset() { + marshaler.reset(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SignalSerializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SignalSerializer.java index c7d7e5c8c..4c306ceb7 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SignalSerializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SignalSerializer.java @@ -8,21 +8,29 @@ import io.opentelemetry.sdk.logs.data.LogRecordData; import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.trace.data.SpanData; +import java.io.IOException; +import java.io.OutputStream; import java.util.Collection; public interface SignalSerializer { static SignalSerializer ofSpans() { - return SpanDataSerializer.getInstance(); + return new SpanDataSerializer(); } static SignalSerializer ofMetrics() { - return MetricDataSerializer.getInstance(); + return new MetricDataSerializer(); } static SignalSerializer ofLogs() { - return LogRecordDataSerializer.getInstance(); + return new LogRecordDataSerializer(); } - byte[] serialize(Collection items); + SignalSerializer initialize(Collection data); + + void writeBinaryTo(OutputStream output) throws IOException; + + int getBinarySerializedSize(); + + void reset(); } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SpanDataSerializer.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SpanDataSerializer.java index 5a26426db..6e3276231 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SpanDataSerializer.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/SpanDataSerializer.java @@ -5,33 +5,41 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.ProtoSpansDataMapper; +import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.contrib.disk.buffering.internal.utils.ProtobufTools; -import io.opentelemetry.proto.trace.v1.TracesData; +import io.opentelemetry.exporter.internal.otlp.traces.LowAllocationTraceRequestMarshaler; import io.opentelemetry.sdk.trace.data.SpanData; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.util.Collection; public final class SpanDataSerializer implements SignalSerializer { - private static final SpanDataSerializer INSTANCE = new SpanDataSerializer(); - private SpanDataSerializer() {} + private final LowAllocationTraceRequestMarshaler marshaler = + new LowAllocationTraceRequestMarshaler(); - static SpanDataSerializer getInstance() { - return INSTANCE; + SpanDataSerializer() {} + + @CanIgnoreReturnValue + @Override + public SpanDataSerializer initialize(Collection data) { + marshaler.initialize(data); + return this; + } + + @Override + public void writeBinaryTo(OutputStream output) throws IOException { + ProtobufTools.writeRawVarint32(marshaler.getBinarySerializedSize(), output); + marshaler.writeBinaryTo(output); + } + + @Override + public int getBinarySerializedSize() { + return marshaler.getBinarySerializedSize(); } @Override - public byte[] serialize(Collection spanData) { - TracesData proto = ProtoSpansDataMapper.getInstance().toProto(spanData); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - int size = TracesData.ADAPTER.encodedSize(proto); - ProtobufTools.writeRawVarint32(size, out); - proto.encode(out); - return out.toByteArray(); - } catch (IOException e) { - throw new IllegalStateException(e); - } + public void reset() { + marshaler.reset(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FileSignalStorage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FileSignalStorage.java new file mode 100644 index 000000000..743dee757 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FileSignalStorage.java @@ -0,0 +1,92 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.storage; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Default storage implementation where items are stored in multiple protobuf files. */ +public final class FileSignalStorage implements SignalStorage { + private final Storage storage; + private final SignalSerializer serializer; + private final SignalDeserializer deserializer; + private final Logger logger = Logger.getLogger(FileSignalStorage.class.getName()); + private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final Object iteratorLock = new Object(); + + @GuardedBy("iteratorLock") + @Nullable + private Iterator> iterator; + + public FileSignalStorage( + Storage storage, SignalSerializer serializer, SignalDeserializer deserializer) { + this.storage = storage; + this.serializer = serializer; + this.deserializer = deserializer; + } + + @Override + public CompletableFuture write(Collection items) { + logger.finer("Intercepting batch."); + try { + serializer.initialize(items); + if (storage.write(serializer)) { + return CompletableFuture.completedFuture(WriteResult.successful()); + } + logger.fine("Could not store batch in disk."); + return CompletableFuture.completedFuture( + WriteResult.error(new Exception("Could not store batch in disk for an unknown reason."))); + } catch (IOException e) { + logger.log( + Level.WARNING, + "An unexpected error happened while attempting to write the data in disk.", + e); + return CompletableFuture.completedFuture(WriteResult.error(e)); + } finally { + serializer.reset(); + } + } + + @Override + public CompletableFuture clear() { + try { + storage.clear(); + return CompletableFuture.completedFuture(WriteResult.successful()); + } catch (IOException e) { + return CompletableFuture.completedFuture(WriteResult.error(e)); + } + } + + @Override + public void close() throws IOException { + if (isClosed.compareAndSet(false, true)) { + storage.close(); + } + } + + @Nonnull + @Override + public Iterator> iterator() { + synchronized (iteratorLock) { + if (iterator == null) { + iterator = new StorageIterator<>(storage, deserializer); + } + return iterator; + } + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java index 0f3ad7ad7..d93751fa1 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/FolderManager.java @@ -7,29 +7,50 @@ import static io.opentelemetry.contrib.disk.buffering.internal.storage.util.ClockBuddy.nowMillis; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.ReadableFile; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.WritableFile; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileStorageConfiguration; import io.opentelemetry.sdk.common.Clock; +import java.io.Closeable; import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; import javax.annotation.Nullable; import org.jetbrains.annotations.NotNull; -public final class FolderManager { +public final class FolderManager implements Closeable { private final File folder; private final Clock clock; - private final StorageConfiguration configuration; + private final FileStorageConfiguration configuration; @Nullable private ReadableFile currentReadableFile; @Nullable private WritableFile currentWritableFile; - public FolderManager(File folder, StorageConfiguration configuration, Clock clock) { + public static FolderManager create( + File destinationDir, FileStorageConfiguration configuration, Clock clock) { + if (destinationDir.isFile()) { + throw new IllegalArgumentException("destinationDir must be a directory"); + } + if (!destinationDir.exists()) { + if (!destinationDir.mkdirs()) { + throw new IllegalStateException("Could not create dir: " + destinationDir); + } + } + return new FolderManager(destinationDir, configuration, clock); + } + + public FolderManager(File folder, FileStorageConfiguration configuration, Clock clock) { this.folder = folder; this.configuration = configuration; this.clock = clock; } + @Override + public void close() throws IOException { + closeCurrentFiles(); + } + @Nullable public synchronized ReadableFile getReadableFile() throws IOException { currentReadableFile = null; @@ -57,6 +78,21 @@ public synchronized WritableFile createWritableFile() throws IOException { return currentWritableFile; } + public synchronized void clear() throws IOException { + closeCurrentFiles(); + List undeletedFiles = new ArrayList<>(); + + for (File file : Objects.requireNonNull(folder.listFiles())) { + if (!file.delete()) { + undeletedFiles.add(file); + } + } + + if (!undeletedFiles.isEmpty()) { + throw new IOException("Could not delete files " + undeletedFiles); + } + } + @Nullable private File findReadableFile() throws IOException { long currentTime = nowMillis(clock); @@ -141,4 +177,13 @@ private boolean hasExpiredForReading(long systemCurrentTimeMillis, long createdT return systemCurrentTimeMillis > (createdTimeInMillis + configuration.getMaxFileAgeForReadMillis()); } + + private synchronized void closeCurrentFiles() throws IOException { + if (currentReadableFile != null) { + currentReadableFile.close(); + } + if (currentWritableFile != null) { + currentWritableFile.close(); + } + } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java index 4ff60cbdc..6b2ee05f7 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/Storage.java @@ -7,122 +7,187 @@ import static java.util.logging.Level.WARNING; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.DeserializationException; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.ReadableFile; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.WritableFile; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.ProcessResult; import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.ReadableResult; import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.WritableResult; -import io.opentelemetry.contrib.disk.buffering.internal.utils.DebugLogger; import java.io.Closeable; import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; +import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Logger; import javax.annotation.Nullable; -public final class Storage implements Closeable { +public final class Storage implements Closeable { private static final int MAX_ATTEMPTS = 3; - private final DebugLogger logger; - + private final Logger logger = Logger.getLogger(Storage.class.getName()); private final FolderManager folderManager; private final AtomicBoolean isClosed = new AtomicBoolean(false); - @Nullable private WritableFile writableFile; - @Nullable private ReadableFile readableFile; + private final AtomicBoolean activeReadResultAvailable = new AtomicBoolean(false); + private final AtomicReference writableFileRef = new AtomicReference<>(); + private final AtomicReference readableFileRef = new AtomicReference<>(); - public Storage(FolderManager folderManager, boolean debugEnabled) { + public Storage(FolderManager folderManager) { this.folderManager = folderManager; - this.logger = - DebugLogger.wrap(Logger.getLogger(FromDiskExporterImpl.class.getName()), debugEnabled); - } - - public static StorageBuilder builder() { - return new StorageBuilder(); } /** * Attempts to write an item into a writable file. * - * @param item - The data that would be appended to the file. + * @param marshaler - The data that would be appended to the file. * @throws IOException If an unexpected error happens. */ - public boolean write(byte[] item) throws IOException { - return write(item, 1); + public boolean write(SignalSerializer marshaler) throws IOException { + return write(marshaler, 1); } - private boolean write(byte[] item, int attemptNumber) throws IOException { + private boolean write(SignalSerializer marshaler, int attemptNumber) throws IOException { if (isClosed.get()) { - logger.log("Refusing to write to storage after being closed."); + logger.fine("Refusing to write to storage after being closed."); return false; } if (attemptNumber > MAX_ATTEMPTS) { - logger.log("Max number of attempts to write buffered data exceeded.", WARNING); + logger.log(WARNING, "Max number of attempts to write buffered data exceeded."); return false; } + WritableFile writableFile = writableFileRef.get(); if (writableFile == null) { writableFile = folderManager.createWritableFile(); - logger.log("Created new writableFile: " + writableFile); + writableFileRef.set(writableFile); + logger.finer("Created new writableFile: " + writableFile); } - WritableResult result = writableFile.append(item); + WritableResult result = writableFile.append(marshaler); if (result != WritableResult.SUCCEEDED) { // Retry with new file - writableFile = null; - return write(item, ++attemptNumber); + writableFileRef.set(null); + return write(marshaler, ++attemptNumber); } return true; } + public void flush() throws IOException { + WritableFile writableFile = writableFileRef.get(); + if (writableFile != null) { + writableFile.flush(); + } else { + logger.info("No writable file to flush."); + } + } + /** * Attempts to read an item from a ready-to-read file. * - * @param processing Is passed over to {@link ReadableFile#readAndProcess(Function)}. * @throws IOException If an unexpected error happens. */ - public ReadableResult readAndProcess(Function processing) - throws IOException { - return readAndProcess(processing, 1); + @Nullable + public ReadableResult readNext(SignalDeserializer deserializer) throws IOException { + if (activeReadResultAvailable.get()) { + throw new IllegalStateException( + "You must close any previous ReadableResult before requesting a new one"); + } + return doReadNext(deserializer, 1); } - private ReadableResult readAndProcess( - Function processing, int attemptNumber) throws IOException { + @Nullable + private ReadableResult doReadNext(SignalDeserializer deserializer, int attemptNumber) + throws IOException { if (isClosed.get()) { - logger.log("Refusing to read from storage after being closed."); - return ReadableResult.FAILED; + logger.fine("Refusing to read from storage after being closed."); + return null; } if (attemptNumber > MAX_ATTEMPTS) { - logger.log("Maximum number of attempts to read and process buffered data exceeded.", WARNING); - return ReadableResult.FAILED; + logger.log(WARNING, "Maximum number of attempts to read buffered data exceeded."); + return null; } + ReadableFile readableFile = readableFileRef.get(); if (readableFile == null) { - logger.log("Obtaining a new readableFile from the folderManager."); + logger.finer("Obtaining a new readableFile from the folderManager."); readableFile = folderManager.getReadableFile(); + readableFileRef.set(readableFile); if (readableFile == null) { - logger.log("Unable to get or create readable file."); - return ReadableResult.FAILED; + logger.fine("Unable to get or create readable file."); + return null; } } - logger.log("Attempting to read data from " + readableFile); - ReadableResult result = readableFile.readAndProcess(processing); - switch (result) { - case SUCCEEDED: - case TRY_LATER: - return result; - default: - // Retry with new file - readableFile = null; - return readAndProcess(processing, ++attemptNumber); + + logger.finer("Attempting to read data from " + readableFile); + byte[] result = readableFile.readNext(); + if (result != null) { + try { + List items = deserializer.deserialize(result); + activeReadResultAvailable.set(true); + return new FileReadResult(items, readableFile); + } catch (DeserializationException e) { + // Data corrupted, clear file. + readableFile.clear(); + } } + + // Retry with new file + readableFileRef.set(null); + return doReadNext(deserializer, ++attemptNumber); + } + + public void clear() throws IOException { + folderManager.clear(); + } + + public boolean isClosed() { + return isClosed.get(); } @Override public void close() throws IOException { - logger.log("Closing disk buffering storage."); + logger.fine("Closing disk buffering storage."); if (isClosed.compareAndSet(false, true)) { - if (writableFile != null) { - writableFile.close(); + folderManager.close(); + writableFileRef.set(null); + readableFileRef.set(null); + } + } + + class FileReadResult implements ReadableResult { + private final Collection content; + private final AtomicBoolean itemDeleted = new AtomicBoolean(false); + private final AtomicBoolean closed = new AtomicBoolean(false); + private final AtomicReference readableFile = new AtomicReference<>(); + + FileReadResult(Collection content, ReadableFile readableFile) { + this.content = content; + this.readableFile.set(readableFile); + } + + @Override + public Collection getContent() { + return content; + } + + @Override + public void delete() throws IOException { + if (closed.get()) { + return; } - if (readableFile != null) { - readableFile.close(); + if (itemDeleted.compareAndSet(false, true)) { + try { + Objects.requireNonNull(readableFile.get()).removeTopItem(); + } catch (IOException e) { + itemDeleted.set(false); + throw e; + } + } + } + + @Override + public void close() throws IOException { + if (closed.compareAndSet(false, true)) { + activeReadResultAvailable.set(false); + readableFile.set(null); } } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageBuilder.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageBuilder.java deleted file mode 100644 index c8b0435ca..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageBuilder.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.storage; - -import com.google.errorprone.annotations.CanIgnoreReturnValue; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.sdk.common.Clock; -import java.io.File; -import java.io.IOException; -import java.util.logging.Level; -import java.util.logging.Logger; - -public class StorageBuilder { - - private static final Logger logger = Logger.getLogger(StorageBuilder.class.getName()); - - private String folderName = "data"; - private StorageConfiguration configuration = StorageConfiguration.getDefault(new File(".")); - private Clock clock = Clock.getDefault(); - - StorageBuilder() {} - - @CanIgnoreReturnValue - public StorageBuilder setFolderName(String folderName) { - this.folderName = folderName; - return this; - } - - @CanIgnoreReturnValue - public StorageBuilder setStorageConfiguration(StorageConfiguration configuration) { - this.configuration = configuration; - return this; - } - - @CanIgnoreReturnValue - public StorageBuilder setStorageClock(Clock clock) { - this.clock = clock; - return this; - } - - public Storage build() throws IOException { - File folder = ensureSubdir(configuration.getRootDir(), folderName); - FolderManager folderManager = new FolderManager(folder, configuration, clock); - if (configuration.isDebugEnabled()) { - logger.log(Level.INFO, "Building storage with configuration => " + configuration); - } - return new Storage(folderManager, configuration.isDebugEnabled()); - } - - private static File ensureSubdir(File rootDir, String child) throws IOException { - File subdir = new File(rootDir, child); - if (subdir.exists() || subdir.mkdirs()) { - return subdir; - } - throw new IOException("Could not create the subdir: '" + child + "' inside: " + rootDir); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageIterator.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageIterator.java new file mode 100644 index 000000000..871238b03 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/StorageIterator.java @@ -0,0 +1,90 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.storage; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.ReadableResult; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.Objects; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +final class StorageIterator implements Iterator> { + private final Storage storage; + private final SignalDeserializer deserializer; + private final Logger logger = Logger.getLogger(StorageIterator.class.getName()); + + @GuardedBy("this") + @Nullable + private ReadableResult currentResult; + + @GuardedBy("this") + private boolean currentResultConsumed = false; + + StorageIterator(Storage storage, SignalDeserializer deserializer) { + this.storage = storage; + this.deserializer = deserializer; + } + + @Override + public synchronized boolean hasNext() { + if (storage.isClosed()) { + return false; + } + return findNext(); + } + + @Override + @Nullable + public synchronized Collection next() { + if (storage.isClosed()) { + return null; + } + if (findNext()) { + currentResultConsumed = true; + return Objects.requireNonNull(currentResult).getContent(); + } + return null; + } + + @Override + public synchronized void remove() { + if (currentResult != null) { + try { + currentResult.delete(); + } catch (IOException e) { + logger.log(Level.SEVERE, "Error deleting stored item", e); + } + } + } + + private synchronized boolean findNext() { + try { + if (currentResult != null) { + if (!currentResultConsumed) { + return true; + } + currentResult.delete(); + currentResult.close(); + currentResult = null; + } + + currentResultConsumed = false; + ReadableResult result = storage.readNext(deserializer); + if (result != null) { + currentResult = result; + return true; + } + } catch (IOException e) { + logger.log(Level.SEVERE, "Error reading from storage", e); + } + return false; + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/FileOperations.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/FileOperations.java index 21544a991..316f51157 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/FileOperations.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/FileOperations.java @@ -9,15 +9,9 @@ import java.io.File; public interface FileOperations extends Closeable { - long getSize(); - boolean hasExpired(); boolean isClosed(); File getFile(); - - default String getFileName() { - return getFile().getName(); - } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFile.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFile.java index f7383d60f..59429d187 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFile.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFile.java @@ -7,24 +7,17 @@ import static io.opentelemetry.contrib.disk.buffering.internal.storage.util.ClockBuddy.nowMillis; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.DelimitedProtoStreamReader; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.ProcessResult; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.ReadResult; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.StreamReader; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils.FileTransferUtil; -import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.ReadableResult; +import io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils.FileStream; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileStorageConfiguration; import io.opentelemetry.sdk.common.Clock; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import javax.annotation.Nonnull; import javax.annotation.Nullable; -import org.jetbrains.annotations.NotNull; /** * Reads from a file and updates it in parallel in order to avoid re-reading the same items later. @@ -36,19 +29,15 @@ *

More information on the overall storage process in the CONTRIBUTING.md file. */ public final class ReadableFile implements FileOperations { - @NotNull private final File file; - private final int originalFileSize; + @Nonnull private final File file; + private final FileStream fileStream; private final StreamReader reader; - private final FileTransferUtil fileTransferUtil; - private final File temporaryFile; private final Clock clock; private final long expireTimeMillis; private final AtomicBoolean isClosed = new AtomicBoolean(false); - private int readBytes = 0; - @Nullable private ReadResult unconsumedResult; public ReadableFile( - File file, long createdTimeMillis, Clock clock, StorageConfiguration configuration) + File file, long createdTimeMillis, Clock clock, FileStorageConfiguration configuration) throws IOException { this( file, @@ -59,82 +48,38 @@ public ReadableFile( } public ReadableFile( - File file, + @Nonnull File file, long createdTimeMillis, Clock clock, - StorageConfiguration configuration, + FileStorageConfiguration configuration, StreamReader.Factory readerFactory) throws IOException { this.file = file; this.clock = clock; expireTimeMillis = createdTimeMillis + configuration.getMaxFileAgeForReadMillis(); - originalFileSize = (int) file.length(); - temporaryFile = configuration.getTemporaryFileProvider().createTemporaryFile(file.getName()); - copyFile(file, temporaryFile); - FileInputStream tempInputStream = new FileInputStream(temporaryFile); - fileTransferUtil = new FileTransferUtil(tempInputStream, file); - reader = readerFactory.create(tempInputStream); + fileStream = FileStream.create(file); + reader = readerFactory.create(fileStream); } /** * Reads the next line available in the file and provides it to a {@link Function processing} * which will determine whether to remove the provided line or not. - * - * @param processing - A function that receives the line that has been read and returns a boolean. - * If the processing function returns TRUE, then the provided line will be deleted from the - * source file. If the function returns FALSE, no changes will be applied to the source file. */ - public synchronized ReadableResult readAndProcess(Function processing) - throws IOException { + @Nullable + public synchronized byte[] readNext() throws IOException { if (isClosed.get()) { - return ReadableResult.FAILED; + return null; } if (hasExpired()) { close(); - return ReadableResult.FAILED; - } - ReadResult read = readNextItem(); - if (read == null) { - cleanUp(); - return ReadableResult.FAILED; - } - switch (processing.apply(read.content)) { - case SUCCEEDED: - unconsumedResult = null; - readBytes += read.totalReadLength; - int amountOfBytesToTransfer = originalFileSize - readBytes; - if (amountOfBytesToTransfer > 0) { - fileTransferUtil.transferBytes(readBytes, amountOfBytesToTransfer); - } else { - cleanUp(); - } - return ReadableResult.SUCCEEDED; - case TRY_LATER: - unconsumedResult = read; - return ReadableResult.TRY_LATER; - case CONTENT_INVALID: - cleanUp(); - return ReadableResult.FAILED; + return null; } - return ReadableResult.FAILED; - } - - @Nullable - private ReadResult readNextItem() throws IOException { - if (unconsumedResult != null) { - return unconsumedResult; + byte[] resultBytes = reader.readNext(); + if (resultBytes == null) { + clear(); + return null; } - return reader.read(); - } - - private void cleanUp() throws IOException { - file.delete(); - close(); - } - - @Override - public long getSize() { - return originalFileSize; + return resultBytes; } @Override @@ -147,35 +92,30 @@ public synchronized boolean isClosed() { return isClosed.get(); } - @NotNull + @Nonnull @Override public File getFile() { return file; } - @Override - public synchronized void close() throws IOException { - if (isClosed.compareAndSet(false, true)) { - unconsumedResult = null; - fileTransferUtil.close(); - reader.close(); - temporaryFile.delete(); + public synchronized void clear() throws IOException { + close(); + if (!file.delete()) { + throw new IOException("Could not delete file: " + file); } } - /** - * This is needed instead of using Files.copy in order to keep it compatible with Android api < - * 26. - */ - private static void copyFile(File from, File to) throws IOException { - try (InputStream in = new FileInputStream(from); - OutputStream out = new FileOutputStream(to)) { + public synchronized void removeTopItem() throws IOException { + fileStream.truncateTop(); + if (fileStream.size() == 0) { + clear(); + } + } - byte[] buffer = new byte[1024]; - int lengthRead; - while ((lengthRead = in.read(buffer)) > 0) { - out.write(buffer, 0, lengthRead); - } + @Override + public synchronized void close() throws IOException { + if (isClosed.compareAndSet(false, true)) { + reader.close(); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFile.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFile.java index 519e9da66..325d30856 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFile.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFile.java @@ -7,8 +7,9 @@ import static io.opentelemetry.contrib.disk.buffering.internal.storage.util.ClockBuddy.nowMillis; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.WritableResult; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileStorageConfiguration; import io.opentelemetry.sdk.common.Clock; import java.io.File; import java.io.FileOutputStream; @@ -20,7 +21,7 @@ public final class WritableFile implements FileOperations { private final File file; - private final StorageConfiguration configuration; + private final FileStorageConfiguration configuration; private final Clock clock; private final long expireTimeMillis; private final OutputStream out; @@ -28,7 +29,7 @@ public final class WritableFile implements FileOperations { private int size; public WritableFile( - File file, long createdTimeMillis, StorageConfiguration configuration, Clock clock) + File file, long createdTimeMillis, FileStorageConfiguration configuration, Clock clock) throws IOException { this.file = file; this.configuration = configuration; @@ -43,9 +44,9 @@ public WritableFile( * reached the configured max size, the file stream is closed with the contents available in the * buffer before attempting to append the new data. * - * @param data - The new data line to add. + * @param marshaler - The new data line to add. */ - public synchronized WritableResult append(byte[] data) throws IOException { + public synchronized WritableResult append(SignalSerializer marshaler) throws IOException { if (isClosed.get()) { return WritableResult.FAILED; } @@ -53,17 +54,16 @@ public synchronized WritableResult append(byte[] data) throws IOException { close(); return WritableResult.FAILED; } - int futureSize = size + data.length; + int futureSize = size + marshaler.getBinarySerializedSize(); if (futureSize > configuration.getMaxFileSize()) { close(); return WritableResult.FAILED; } - out.write(data); + marshaler.writeBinaryTo(out); size = futureSize; return WritableResult.SUCCEEDED; } - @Override public synchronized long getSize() { return size; } @@ -94,4 +94,8 @@ public synchronized void close() throws IOException { public String toString() { return "WritableFile{" + "file=" + file + '}'; } + + public void flush() throws IOException { + out.flush(); + } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/DelimitedProtoStreamReader.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/DelimitedProtoStreamReader.java index 0f9723c4c..d638c118b 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/DelimitedProtoStreamReader.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/DelimitedProtoStreamReader.java @@ -5,33 +5,36 @@ package io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils.CountingInputStream; import io.opentelemetry.contrib.disk.buffering.internal.utils.ProtobufTools; import java.io.IOException; import java.io.InputStream; import javax.annotation.Nullable; -public final class DelimitedProtoStreamReader extends StreamReader { - private final CountingInputStream countingInputStream; +public final class DelimitedProtoStreamReader implements StreamReader { + private final InputStream inputStream; public DelimitedProtoStreamReader(InputStream inputStream) { - super(new CountingInputStream(inputStream)); - countingInputStream = (CountingInputStream) this.inputStream; + this.inputStream = inputStream; } @Override @Nullable - public ReadResult read() throws IOException { - int startingPosition = countingInputStream.getPosition(); + public byte[] readNext() throws IOException { int itemSize = getNextItemSize(); if (itemSize < 1) { return null; } byte[] bytes = new byte[itemSize]; - if (inputStream.read(bytes) < 0) { - return null; + int offset = 0; + int readCt; + do { + readCt = inputStream.read(bytes, offset, itemSize - offset); + offset += readCt; + } while (readCt != -1 && offset < itemSize); + if (offset != itemSize) { + return null; // unable to read the whole item correctly } - return new ReadResult(bytes, countingInputStream.getPosition() - startingPosition); + return bytes; } private int getNextItemSize() { @@ -46,6 +49,11 @@ private int getNextItemSize() { } } + @Override + public void close() throws IOException { + inputStream.close(); + } + public static class Factory implements StreamReader.Factory { private static final Factory INSTANCE = new DelimitedProtoStreamReader.Factory(); @@ -57,8 +65,8 @@ public static Factory getInstance() { private Factory() {} @Override - public StreamReader create(InputStream stream) { - return new DelimitedProtoStreamReader(stream); + public StreamReader create(InputStream inputStream) { + return new DelimitedProtoStreamReader(inputStream); } } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ProcessResult.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ProcessResult.java deleted file mode 100644 index 696d98d2c..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ProcessResult.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader; - -/** Result of processing the contents of a file. */ -public enum ProcessResult { - SUCCEEDED, - TRY_LATER, - CONTENT_INVALID -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ReadResult.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ReadResult.java deleted file mode 100644 index 079c2396c..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/ReadResult.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader; - -public final class ReadResult { - /** The consumable data. */ - public final byte[] content; - - /** - * The total amount of data read from the stream. This number can be greater than the content - * length as it also takes into account any delimiters size. - */ - public final int totalReadLength; - - public ReadResult(byte[] content, int totalReadLength) { - this.content = content; - this.totalReadLength = totalReadLength; - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/StreamReader.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/StreamReader.java index d263aad71..925422f67 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/StreamReader.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/reader/StreamReader.java @@ -10,22 +10,11 @@ import java.io.InputStream; import javax.annotation.Nullable; -public abstract class StreamReader implements Closeable { - protected final InputStream inputStream; - - protected StreamReader(InputStream inputStream) { - this.inputStream = inputStream; - } - +public interface StreamReader extends Closeable { @Nullable - public abstract ReadResult read() throws IOException; - - @Override - public void close() throws IOException { - inputStream.close(); - } + byte[] readNext() throws IOException; - public interface Factory { + interface Factory { StreamReader create(InputStream stream); } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/CountingInputStream.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/CountingInputStream.java deleted file mode 100644 index 9faa2c018..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/CountingInputStream.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils; - -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; - -public final class CountingInputStream extends FilterInputStream { - - private int position; - private int mark = -1; - - public CountingInputStream(InputStream in) { - super(in); - } - - public int getPosition() { - return position; - } - - @Override - public synchronized void mark(int readlimit) { - in.mark(readlimit); - mark = position; - } - - @Override - public long skip(long n) throws IOException { - long result = in.skip(n); - position = (int) (position + result); - return result; - } - - @Override - public int read() throws IOException { - int result = in.read(); - if (result != -1) { - position++; - } - return result; - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - int result = in.read(b, off, len); - if (result != -1) { - position += result; - } - return result; - } - - @Override - public synchronized void reset() throws IOException { - if (!in.markSupported()) { - throw new IOException("Mark is not supported"); - } - if (mark == -1) { - throw new IOException("Mark is not set"); - } - - in.reset(); - position = mark; - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStream.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStream.java new file mode 100644 index 000000000..c49570922 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStream.java @@ -0,0 +1,82 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import org.jetbrains.annotations.NotNull; + +public class FileStream extends InputStream { + private final RandomAccessFile file; + private final FileChannel channel; + + public static FileStream create(File file) throws IOException { + RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rwd"); + FileChannel channel = randomAccessFile.getChannel(); + channel.force(false); + return new FileStream(randomAccessFile, channel); + } + + private FileStream(RandomAccessFile file, FileChannel channel) { + this.file = file; + this.channel = channel; + } + + @Override + public int read() throws IOException { + return file.read(); + } + + @Override + public int read(@NotNull byte[] bytes) throws IOException { + return file.read(bytes); + } + + @Override + public int read(@NotNull byte[] b, int off, int len) throws IOException { + return file.read(b, off, len); + } + + public long size() throws IOException { + return channel.size(); + } + + @Override + public void close() throws IOException { + channel.close(); + file.close(); + } + + public void truncateTop(long size) throws IOException { + file.seek(Math.min(size(), size)); + truncateTop(); + } + + public void truncateTop() throws IOException { + long position = file.getFilePointer(); + if (position == 0) { + return; + } + long remainingSize = size() - position; + if (remainingSize > 0) { + byte[] remainingBytes = new byte[(int) remainingSize]; + file.read(remainingBytes); + file.seek(0); + channel.truncate(remainingSize); + file.write(remainingBytes); + file.seek(0); + } else { + channel.truncate(0); + } + } + + public long getPosition() throws IOException { + return file.getFilePointer(); + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileTransferUtil.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileTransferUtil.java deleted file mode 100644 index e4729cb53..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileTransferUtil.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils; - -import java.io.Closeable; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.channels.FileChannel; - -public final class FileTransferUtil implements Closeable { - private final File output; - - private final FileChannel inputChannel; - - public FileTransferUtil(FileInputStream input, File output) { - this.output = output; - inputChannel = input.getChannel(); - } - - public void transferBytes(int offset, int length) throws IOException { - try (FileOutputStream out = new FileOutputStream(output, false)) { - inputChannel.transferTo(offset, length, out.getChannel()); - } - } - - @Override - public void close() throws IOException { - inputChannel.close(); - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/responses/ReadableResult.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/responses/ReadableResult.java index 295bc2289..1e2877c7b 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/responses/ReadableResult.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/storage/responses/ReadableResult.java @@ -5,8 +5,14 @@ package io.opentelemetry.contrib.disk.buffering.internal.storage.responses; -public enum ReadableResult { - SUCCEEDED, - FAILED, - TRY_LATER +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; + +public interface ReadableResult extends Closeable { + /** The consumable data. */ + Collection getContent(); + + /** Delete the items provided in {@link #getContent()} */ + void delete() throws IOException; } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/DebugLogger.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/DebugLogger.java deleted file mode 100644 index 46ff72ebf..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/DebugLogger.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.utils; - -import java.util.logging.Level; -import java.util.logging.Logger; - -public class DebugLogger { - private final Logger logger; - private final boolean debugEnabled; - - private DebugLogger(Logger logger, boolean debugEnabled) { - this.logger = logger; - this.debugEnabled = debugEnabled; - } - - public static DebugLogger wrap(Logger logger, boolean debugEnabled) { - return new DebugLogger(logger, debugEnabled); - } - - public void log(String msg) { - log(msg, Level.INFO); - } - - public void log(String msg, Level level) { - if (debugEnabled) { - logger.log(level, msg); - } - } - - public void log(String msg, Level level, Throwable e) { - if (debugEnabled) { - logger.log(level, msg, e); - } - } -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/SignalTypes.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/SignalTypes.java deleted file mode 100644 index c0a7f5765..000000000 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/internal/utils/SignalTypes.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.utils; - -public enum SignalTypes { - metrics, - spans, - logs -} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java new file mode 100644 index 000000000..e8daf391a --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/SignalStorage.java @@ -0,0 +1,55 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage; + +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.logs.data.LogRecordData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.trace.data.SpanData; +import java.io.Closeable; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; + +/** + * Allows writing and iterating over written signal items. + * + * @param The type of signal data supported. + */ +public interface SignalStorage extends Iterable>, Closeable { + + /** + * Stores signal items. + * + * @param items The items to be stored. + * @return A future with {@link WriteResult}. + */ + CompletableFuture write(Collection items); + + /** + * Removes all the previously stored items. + * + * @return A future with {@link WriteResult}. + */ + CompletableFuture clear(); + + /** + * Abstraction for Spans. Implementations should use this instead of {@link SignalStorage} + * directly. + */ + interface Span extends SignalStorage {} + + /** + * Abstraction for Logs. Implementations should use this instead of {@link SignalStorage} + * directly. + */ + interface LogRecord extends SignalStorage {} + + /** + * Abstraction for Metrics. Implementations should use this instead of {@link SignalStorage} + * directly. + */ + interface Metric extends SignalStorage {} +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileLogRecordStorage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileLogRecordStorage.java new file mode 100644 index 000000000..71d5d884b --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileLogRecordStorage.java @@ -0,0 +1,63 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage.impl; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FileSignalStorage; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FolderManager; +import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.common.Clock; +import io.opentelemetry.sdk.logs.data.LogRecordData; +import java.io.File; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import javax.annotation.Nonnull; + +public final class FileLogRecordStorage implements SignalStorage.LogRecord { + private final FileSignalStorage fileSignalStorage; + + public static FileLogRecordStorage create(File destinationDir) { + return create(destinationDir, FileStorageConfiguration.getDefault()); + } + + public static FileLogRecordStorage create( + File destinationDir, FileStorageConfiguration configuration) { + Storage storage = + new Storage<>(FolderManager.create(destinationDir, configuration, Clock.getDefault())); + return new FileLogRecordStorage( + new FileSignalStorage<>(storage, SignalSerializer.ofLogs(), SignalDeserializer.ofLogs())); + } + + private FileLogRecordStorage(FileSignalStorage fileSignalStorage) { + this.fileSignalStorage = fileSignalStorage; + } + + @Override + public CompletableFuture write(Collection items) { + return fileSignalStorage.write(items); + } + + @Override + public CompletableFuture clear() { + return fileSignalStorage.clear(); + } + + @Override + public void close() throws IOException { + fileSignalStorage.close(); + } + + @Nonnull + @Override + public Iterator> iterator() { + return fileSignalStorage.iterator(); + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileMetricStorage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileMetricStorage.java new file mode 100644 index 000000000..8f8b41508 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileMetricStorage.java @@ -0,0 +1,64 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage.impl; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FileSignalStorage; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FolderManager; +import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.common.Clock; +import io.opentelemetry.sdk.metrics.data.MetricData; +import java.io.File; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import javax.annotation.Nonnull; + +public final class FileMetricStorage implements SignalStorage.Metric { + private final FileSignalStorage fileSignalStorage; + + public static FileMetricStorage create(File destinationDir) { + return create(destinationDir, FileStorageConfiguration.getDefault()); + } + + public static FileMetricStorage create( + File destinationDir, FileStorageConfiguration configuration) { + Storage storage = + new Storage<>(FolderManager.create(destinationDir, configuration, Clock.getDefault())); + return new FileMetricStorage( + new FileSignalStorage<>( + storage, SignalSerializer.ofMetrics(), SignalDeserializer.ofMetrics())); + } + + private FileMetricStorage(FileSignalStorage fileSignalStorage) { + this.fileSignalStorage = fileSignalStorage; + } + + @Override + public CompletableFuture write(Collection items) { + return fileSignalStorage.write(items); + } + + @Override + public CompletableFuture clear() { + return fileSignalStorage.clear(); + } + + @Override + public void close() throws IOException { + fileSignalStorage.close(); + } + + @Nonnull + @Override + public Iterator> iterator() { + return fileSignalStorage.iterator(); + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileSpanStorage.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileSpanStorage.java new file mode 100644 index 000000000..b8cfa8996 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileSpanStorage.java @@ -0,0 +1,63 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage.impl; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FileSignalStorage; +import io.opentelemetry.contrib.disk.buffering.internal.storage.FolderManager; +import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.common.Clock; +import io.opentelemetry.sdk.trace.data.SpanData; +import java.io.File; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import javax.annotation.Nonnull; + +public final class FileSpanStorage implements SignalStorage.Span { + private final FileSignalStorage fileSignalStorage; + + public static FileSpanStorage create(File destinationDir) { + return create(destinationDir, FileStorageConfiguration.getDefault()); + } + + public static FileSpanStorage create( + File destinationDir, FileStorageConfiguration configuration) { + Storage storage = + new Storage<>(FolderManager.create(destinationDir, configuration, Clock.getDefault())); + return new FileSpanStorage( + new FileSignalStorage<>(storage, SignalSerializer.ofSpans(), SignalDeserializer.ofSpans())); + } + + private FileSpanStorage(FileSignalStorage fileSignalStorage) { + this.fileSignalStorage = fileSignalStorage; + } + + @Override + public CompletableFuture write(Collection items) { + return fileSignalStorage.write(items); + } + + @Override + public CompletableFuture clear() { + return fileSignalStorage.clear(); + } + + @Override + public void close() throws IOException { + fileSignalStorage.close(); + } + + @Nonnull + @Override + public Iterator> iterator() { + return fileSignalStorage.iterator(); + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/StorageConfiguration.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileStorageConfiguration.java similarity index 56% rename from disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/StorageConfiguration.java rename to disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileStorageConfiguration.java index 2470b263a..0a34c12b4 100644 --- a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/config/StorageConfiguration.java +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/impl/FileStorageConfiguration.java @@ -3,22 +3,16 @@ * SPDX-License-Identifier: Apache-2.0 */ -package io.opentelemetry.contrib.disk.buffering.config; +package io.opentelemetry.contrib.disk.buffering.storage.impl; + +import static java.util.concurrent.TimeUnit.HOURS; +import static java.util.concurrent.TimeUnit.SECONDS; import com.google.auto.value.AutoValue; -import io.opentelemetry.contrib.disk.buffering.internal.files.DefaultTemporaryFileProvider; -import java.io.File; -import java.util.concurrent.TimeUnit; /** Defines how the storage should be managed. */ @AutoValue -public abstract class StorageConfiguration { - - /** The root storage location for buffered telemetry. */ - public abstract File getRootDir(); - - /** Returns true if the storage has been configured with debug verbosity enabled. */ - public abstract boolean isDebugEnabled(); +public abstract class FileStorageConfiguration { /** The max amount of time a file can receive new data. */ public abstract long getMaxFileAgeForWriteMillis(); @@ -49,23 +43,17 @@ public abstract class StorageConfiguration { */ public abstract int getMaxFolderSize(); - /** A creator of temporary files needed to do the disk reading process. */ - public abstract TemporaryFileProvider getTemporaryFileProvider(); - - public static StorageConfiguration getDefault(File rootDir) { - return builder().setRootDir(rootDir).build(); + public static FileStorageConfiguration getDefault() { + return builder().build(); } public static Builder builder() { - TemporaryFileProvider fileProvider = DefaultTemporaryFileProvider.getInstance(); - return new AutoValue_StorageConfiguration.Builder() + return new AutoValue_FileStorageConfiguration.Builder() .setMaxFileSize(1024 * 1024) // 1MB .setMaxFolderSize(10 * 1024 * 1024) // 10MB - .setMaxFileAgeForWriteMillis(TimeUnit.SECONDS.toMillis(30)) - .setMinFileAgeForReadMillis(TimeUnit.SECONDS.toMillis(33)) - .setMaxFileAgeForReadMillis(TimeUnit.HOURS.toMillis(18)) - .setDebugEnabled(false) - .setTemporaryFileProvider(fileProvider); + .setMaxFileAgeForWriteMillis(SECONDS.toMillis(30)) + .setMinFileAgeForReadMillis(SECONDS.toMillis(33)) + .setMaxFileAgeForReadMillis(HOURS.toMillis(18)); } @AutoValue.Builder @@ -80,12 +68,16 @@ public abstract static class Builder { public abstract Builder setMaxFolderSize(int value); - public abstract Builder setTemporaryFileProvider(TemporaryFileProvider value); - - public abstract Builder setRootDir(File rootDir); - - public abstract Builder setDebugEnabled(boolean debugEnabled); - - public abstract StorageConfiguration build(); + abstract FileStorageConfiguration autoBuild(); + + public final FileStorageConfiguration build() { + FileStorageConfiguration configuration = autoBuild(); + if (configuration.getMinFileAgeForReadMillis() + <= configuration.getMaxFileAgeForWriteMillis()) { + throw new IllegalArgumentException( + "The configured max file age for writing must be lower than the configured min file age for reading"); + } + return configuration; + } } } diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/DefaultWriteResult.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/DefaultWriteResult.java new file mode 100644 index 000000000..ff693c74b --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/DefaultWriteResult.java @@ -0,0 +1,29 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage.result; + +import javax.annotation.Nullable; + +final class DefaultWriteResult implements WriteResult { + private final boolean successful; + @Nullable private final Throwable error; + + DefaultWriteResult(boolean successful, @Nullable Throwable error) { + this.successful = successful; + this.error = error; + } + + @Override + public boolean isSuccessful() { + return successful; + } + + @Nullable + @Override + public Throwable getError() { + return error; + } +} diff --git a/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/WriteResult.java b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/WriteResult.java new file mode 100644 index 000000000..85c08ed37 --- /dev/null +++ b/disk-buffering/src/main/java/io/opentelemetry/contrib/disk/buffering/storage/result/WriteResult.java @@ -0,0 +1,35 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.storage.result; + +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import javax.annotation.Nullable; + +/** The result of a {@link SignalStorage} write operation. */ +public interface WriteResult { + /** + * Whether the operation succeeded or not. + * + * @return `true` if the items have been successfully stored, `false` otherwise. + */ + boolean isSuccessful(); + + /** + * Provides details of why the operation failed. + * + * @return The error (if any) for the failed operation. It must be null for successful operations. + */ + @Nullable + Throwable getError(); + + static WriteResult successful() { + return new DefaultWriteResult(/* successful= */ true, null); + } + + static WriteResult error(@Nullable Throwable t) { + return new DefaultWriteResult(/* successful= */ false, t); + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/FromDiskExporterImplTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/FromDiskExporterImplTest.java deleted file mode 100644 index b2955630e..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/FromDiskExporterImplTest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.MIN_FILE_AGE_FOR_READ_MILLIS; -import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.DeserializationException; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.TestData; -import io.opentelemetry.sdk.common.Clock; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; - -@SuppressWarnings("unchecked") -class FromDiskExporterImplTest { - private SpanExporter wrapped; - private SignalDeserializer deserializer; - private Clock clock; - private FromDiskExporterImpl exporter; - private final List deserializedData = Collections.emptyList(); - @TempDir File rootDir; - private static final String STORAGE_FOLDER_NAME = "testName"; - - @BeforeEach - void setUp() throws IOException { - clock = createClockMock(); - - setUpSerializer(); - wrapped = mock(); - exporter = - FromDiskExporterImpl.builder() - .setFolderName(STORAGE_FOLDER_NAME) - .setStorageConfiguration(TestData.getDefaultConfiguration(rootDir)) - .setDeserializer(deserializer) - .setExportFunction(wrapped::export) - .setStorageClock(clock) - .build(); - } - - @Test - void whenExportingStoredBatch_withAvailableData_andSuccessfullyProcessed_returnTrue() - throws IOException { - when(wrapped.export(deserializedData)).thenReturn(CompletableResultCode.ofSuccess()); - - createDummyFile(); - when(clock.now()).thenReturn(MILLISECONDS.toNanos(1000L + MIN_FILE_AGE_FOR_READ_MILLIS)); - - assertThat(exporter.exportStoredBatch(1, TimeUnit.SECONDS)).isTrue(); - } - - @Test - void whenExportingStoredBatch_withAvailableData_andUnsuccessfullyProcessed_returnFalse() - throws IOException { - when(wrapped.export(deserializedData)).thenReturn(CompletableResultCode.ofSuccess()); - - createDummyFile(); - when(clock.now()).thenReturn(1000L + MIN_FILE_AGE_FOR_READ_MILLIS); - - assertThat(exporter.exportStoredBatch(1, TimeUnit.SECONDS)).isFalse(); - } - - @Test - void whenExportingStoredBatch_withNoAvailableData_returnFalse() throws IOException { - assertThat(exporter.exportStoredBatch(1, TimeUnit.SECONDS)).isFalse(); - } - - @Test - void verifyStorageFolderIsCreated() { - assertThat(new File(rootDir, STORAGE_FOLDER_NAME).exists()).isTrue(); - } - - @Test - void whenDeserializationFails_returnFalse() throws IOException { - doThrow(DeserializationException.class).when(deserializer).deserialize(any()); - - assertThat(exporter.exportStoredBatch(1, TimeUnit.SECONDS)).isFalse(); - } - - private void createDummyFile() throws IOException { - File file = new File(rootDir, STORAGE_FOLDER_NAME + "/" + 1000L); - Files.write(file.toPath(), singletonList("First line")); - } - - private void setUpSerializer() throws DeserializationException { - deserializer = mock(); - when(deserializer.deserialize(any())).thenReturn(deserializedData); - } - - private static Clock createClockMock() { - Clock mock = mock(); - when(mock.now()).thenReturn(MILLISECONDS.toNanos(1000L)); - return mock; - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/IntegrationTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/IntegrationTest.java index c1c42b0bb..d30466430 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/IntegrationTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/IntegrationTest.java @@ -5,26 +5,25 @@ package io.opentelemetry.contrib.disk.buffering; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static java.lang.Thread.sleep; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import io.opentelemetry.api.logs.Logger; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterBuilder; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.FromDiskExporterImpl; -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.sdk.common.Clock; -import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.contrib.disk.buffering.exporters.LogRecordToDiskExporter; +import io.opentelemetry.contrib.disk.buffering.exporters.MetricToDiskExporter; +import io.opentelemetry.contrib.disk.buffering.exporters.SpanToDiskExporter; +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileLogRecordStorage; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileMetricStorage; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileSpanStorage; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileStorageConfiguration; import io.opentelemetry.sdk.logs.SdkLoggerProvider; import io.opentelemetry.sdk.logs.data.LogRecordData; import io.opentelemetry.sdk.logs.export.LogRecordExporter; @@ -33,159 +32,124 @@ import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; -import io.opentelemetry.sdk.testing.exporter.InMemoryLogRecordExporter; -import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; -import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import java.io.File; import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.function.Supplier; -import org.jetbrains.annotations.NotNull; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; -public class IntegrationTest { - private InMemorySpanExporter memorySpanExporter; - private SpanToDiskExporter spanToDiskExporter; +@ExtendWith(MockitoExtension.class) +class IntegrationTest { private Tracer tracer; - private InMemoryMetricExporter memoryMetricExporter; - private MetricToDiskExporter metricToDiskExporter; private SdkMeterProvider meterProvider; private Meter meter; - private InMemoryLogRecordExporter memoryLogRecordExporter; - private LogRecordToDiskExporter logToDiskExporter; private Logger logger; - private Clock clock; - @TempDir File rootDir; - private static final long INITIAL_TIME_IN_MILLIS = 1000; - private static final long NOW_NANOS = MILLISECONDS.toNanos(INITIAL_TIME_IN_MILLIS); - private StorageConfiguration storageConfig; + private SignalStorage.Span spanStorage; + private SignalStorage.LogRecord logStorage; + private SignalStorage.Metric metricStorage; + private SpanToDiskExporter spanToDiskExporter; + private MetricToDiskExporter metricToDiskExporter; + private LogRecordToDiskExporter logToDiskExporter; + @Mock private ExporterCallback spanCallback; + @Mock private ExporterCallback logCallback; + @Mock private ExporterCallback metricCallback; + @TempDir private File rootDir; + private static final long DELAY_BEFORE_READING_MILLIS = 500; @BeforeEach - void setUp() throws IOException { - storageConfig = StorageConfiguration.getDefault(rootDir); - clock = mock(); - - when(clock.now()).thenReturn(NOW_NANOS); + void setUp() { + FileStorageConfiguration storageConfig = + FileStorageConfiguration.builder() + .setMaxFileAgeForWriteMillis(DELAY_BEFORE_READING_MILLIS - 1) + .setMinFileAgeForReadMillis(DELAY_BEFORE_READING_MILLIS) + .build(); // Setting up spans - memorySpanExporter = InMemorySpanExporter.create(); - ToDiskExporter toDiskSpanExporter = - buildToDiskExporter(SignalSerializer.ofSpans(), memorySpanExporter::export); - spanToDiskExporter = new SpanToDiskExporter(toDiskSpanExporter); + spanStorage = FileSpanStorage.create(new File(rootDir, "spans"), storageConfig); + spanToDiskExporter = + SpanToDiskExporter.builder(spanStorage).setExporterCallback(spanCallback).build(); tracer = createTracerProvider(spanToDiskExporter).get("SpanInstrumentationScope"); // Setting up metrics - memoryMetricExporter = InMemoryMetricExporter.create(); - ToDiskExporter toDiskMetricExporter = - buildToDiskExporter(SignalSerializer.ofMetrics(), memoryMetricExporter::export); - metricToDiskExporter = new MetricToDiskExporter(toDiskMetricExporter, memoryMetricExporter); + metricStorage = FileMetricStorage.create(new File(rootDir, "metrics"), storageConfig); + metricToDiskExporter = + MetricToDiskExporter.builder(metricStorage).setExporterCallback(metricCallback).build(); meterProvider = createMeterProvider(metricToDiskExporter); meter = meterProvider.get("MetricInstrumentationScope"); // Setting up logs - memoryLogRecordExporter = InMemoryLogRecordExporter.create(); - ToDiskExporter toDiskLogExporter = - buildToDiskExporter(SignalSerializer.ofLogs(), memoryLogRecordExporter::export); - logToDiskExporter = new LogRecordToDiskExporter(toDiskLogExporter); + logStorage = FileLogRecordStorage.create(new File(rootDir, "logs"), storageConfig); + logToDiskExporter = + LogRecordToDiskExporter.builder(logStorage).setExporterCallback(logCallback).build(); logger = createLoggerProvider(logToDiskExporter).get("LogInstrumentationScope"); } - @NotNull - private ToDiskExporter buildToDiskExporter( - SignalSerializer serializer, Function, CompletableResultCode> exporter) - throws IOException { - return ToDiskExporter.builder() - .setFolderName(SignalTypes.spans.name()) - .setStorageConfiguration(storageConfig) - .setSerializer(serializer) - .setExportFunction(exporter) - .setStorageClock(clock) - .build(); - } - - @NotNull - private FromDiskExporterImpl buildFromDiskExporter( - FromDiskExporterBuilder builder, - Function, CompletableResultCode> exportFunction, - SignalDeserializer deserializer) - throws IOException { - return builder - .setExportFunction(exportFunction) - .setFolderName(SignalTypes.spans.name()) - .setStorageConfiguration(storageConfig) - .setDeserializer(deserializer) - .setStorageClock(clock) - .build(); + @AfterEach + void tearDown() throws IOException { + // Closing span exporter + spanToDiskExporter.shutdown(); + verify(spanCallback).onShutdown(); + verifyNoMoreInteractions(spanCallback); + + // Closing log exporter + logToDiskExporter.shutdown(); + verify(logCallback).onShutdown(); + verifyNoMoreInteractions(spanCallback); + + // Closing metric exporter + metricToDiskExporter.shutdown(); + verify(metricCallback).onShutdown(); + verifyNoMoreInteractions(spanCallback); + + // Closing storages + spanStorage.close(); + logStorage.close(); + metricStorage.close(); } @Test - void verifySpansIntegration() throws IOException { + void verifyIntegration() throws InterruptedException { + // Creating span Span span = tracer.spanBuilder("Span name").startSpan(); span.end(); - FromDiskExporterImpl fromDiskExporter = - buildFromDiskExporter( - FromDiskExporterImpl.builder(), - memorySpanExporter::export, - SignalDeserializer.ofSpans()); - assertExporter(fromDiskExporter, () -> memorySpanExporter.getFinishedSpanItems().size()); - } - - @Test - void verifyMetricsIntegration() throws IOException { - meter.counterBuilder("Counter").build().add(2); - meterProvider.forceFlush(); - - FromDiskExporterImpl fromDiskExporter = - buildFromDiskExporter( - FromDiskExporterImpl.builder(), - memoryMetricExporter::export, - SignalDeserializer.ofMetrics()); - assertExporter(fromDiskExporter, () -> memoryMetricExporter.getFinishedMetricItems().size()); - } - - @Test - void verifyLogRecordsIntegration() throws IOException { - logger.logRecordBuilder().setBody("I'm a log!").emit(); - - FromDiskExporterImpl fromDiskExporter = - buildFromDiskExporter( - FromDiskExporterImpl.builder(), - memoryLogRecordExporter::export, - SignalDeserializer.ofLogs()); - assertExporter( - fromDiskExporter, () -> memoryLogRecordExporter.getFinishedLogRecordItems().size()); - } + verify(spanCallback).onExportSuccess(anyCollection()); + verifyNoMoreInteractions(spanCallback); - private void assertExporter(FromDiskExporterImpl exporter, Supplier finishedItems) - throws IOException { - // Verify no data has been received in the original exporter until this point. - assertEquals(0, finishedItems.get()); + // Creating log + logger.logRecordBuilder().setBody("Log body").emit(); + verify(logCallback).onExportSuccess(anyCollection()); + verifyNoMoreInteractions(spanCallback); - // Go to the future when we can read the stored items. - fastForwardTimeByMillis(storageConfig.getMinFileAgeForReadMillis()); - - // Read and send stored data. - assertTrue(exporter.exportStoredBatch(1, TimeUnit.SECONDS)); - - // Now the data must have been delegated to the original exporter. - assertEquals(1, finishedItems.get()); - - // Bonus: Try to read again, no more data should be available. - assertFalse(exporter.exportStoredBatch(1, TimeUnit.SECONDS)); - assertEquals(1, finishedItems.get()); - } - - @SuppressWarnings("DirectInvocationOnMock") - private void fastForwardTimeByMillis(long milliseconds) { - when(clock.now()).thenReturn(NOW_NANOS + MILLISECONDS.toNanos(milliseconds)); + // Creating metric + meter.counterBuilder("counter").build().add(1); + meterProvider.forceFlush(); + verify(metricCallback).onExportSuccess(anyCollection()); + verifyNoMoreInteractions(spanCallback); + + // Waiting for read time + sleep(DELAY_BEFORE_READING_MILLIS); + + // Read + List storedSpans = new ArrayList<>(); + List storedLogs = new ArrayList<>(); + List storedMetrics = new ArrayList<>(); + spanStorage.forEach(storedSpans::addAll); + logStorage.forEach(storedLogs::addAll); + metricStorage.forEach(storedMetrics::addAll); + + assertThat(storedSpans).hasSize(1); + assertThat(storedLogs).hasSize(1); + assertThat(storedMetrics).hasSize(1); } private static SdkTracerProvider createTracerProvider(SpanExporter exporter) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporterTest.java deleted file mode 100644 index 6409cf067..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/LogRecordToDiskExporterTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.logs.data.LogRecordData; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class LogRecordToDiskExporterTest { - - @Mock private ToDiskExporter delegate; - - @Test - void delegateShutdown_success() throws IOException { - LogRecordToDiskExporter testClass = new LogRecordToDiskExporter(delegate); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isTrue(); - verify(delegate).shutdown(); - } - - @Test - void delegateShutdown_fail() throws IOException { - doThrow(new IOException("boom")).when(delegate).shutdown(); - LogRecordToDiskExporter testClass = new LogRecordToDiskExporter(delegate); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isFalse(); - verify(delegate).shutdown(); - } - - @Test - void delegateExport() { - LogRecordData log1 = mock(); - LogRecordData log2 = mock(); - List logRecords = Arrays.asList(log1, log2); - - LogRecordToDiskExporter testClass = new LogRecordToDiskExporter(delegate); - testClass.export(logRecords); - - verify(delegate).export(logRecords); - } - - @Test - void flushReturnsSuccess() { - LogRecordToDiskExporter testClass = new LogRecordToDiskExporter(delegate); - CompletableResultCode result = testClass.flush(); - assertThat(result.isSuccess()).isTrue(); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporterTest.java deleted file mode 100644 index 9ba84f67c..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/MetricToDiskExporterTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.CUMULATIVE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.metrics.InstrumentType; -import io.opentelemetry.sdk.metrics.data.AggregationTemporality; -import io.opentelemetry.sdk.metrics.data.MetricData; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class MetricToDiskExporterTest { - - @Mock private ToDiskExporter delegate; - - @Test - void delegateShutdown_success() throws IOException { - MetricToDiskExporter testClass = - new MetricToDiskExporter(delegate, MetricToDiskExporterTest::temporalityFn); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isTrue(); - verify(delegate).shutdown(); - } - - private static AggregationTemporality temporalityFn(InstrumentType instrumentType) { - return CUMULATIVE; - } - - @Test - void delegateShutdown_fail() throws IOException { - doThrow(new IOException("boom")).when(delegate).shutdown(); - MetricToDiskExporter testClass = - new MetricToDiskExporter(delegate, MetricToDiskExporterTest::temporalityFn); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isFalse(); - verify(delegate).shutdown(); - } - - @Test - void delegateExport() { - MetricData metric1 = mock(); - MetricData metric2 = mock(); - List metrics = Arrays.asList(metric1, metric2); - - MetricToDiskExporter testClass = - new MetricToDiskExporter(delegate, MetricToDiskExporterTest::temporalityFn); - - testClass.export(metrics); - - verify(delegate).export(metrics); - } - - @Test - void flushReturnsSuccess() { - MetricToDiskExporter testClass = - new MetricToDiskExporter(delegate, MetricToDiskExporterTest::temporalityFn); - - CompletableResultCode result = testClass.flush(); - assertThat(result.isSuccess()).isTrue(); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporterTest.java deleted file mode 100644 index 05eef5a03..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanFromDiskExporterTest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.SpanKind; -import io.opentelemetry.api.trace.StatusCode; -import io.opentelemetry.api.trace.TraceFlags; -import io.opentelemetry.api.trace.TraceState; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.internal.files.DefaultTemporaryFileProvider; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.models.SpanDataImpl; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.contrib.disk.buffering.internal.utils.SignalTypes; -import io.opentelemetry.contrib.disk.buffering.testutils.TestData; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.resources.Resource; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.data.StatusData; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.io.File; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import org.mockito.ArgumentCaptor; - -class SpanFromDiskExporterTest { - - @TempDir File tempDir; - - @SuppressWarnings("unchecked") - @Test - void fromDisk() throws Exception { - StorageConfiguration config = - StorageConfiguration.builder() - .setRootDir(tempDir) - .setMaxFileAgeForWriteMillis(TimeUnit.HOURS.toMillis(24)) - .setMinFileAgeForReadMillis(0) - .setMaxFileAgeForReadMillis(TimeUnit.HOURS.toMillis(24)) - .setTemporaryFileProvider(DefaultTemporaryFileProvider.getInstance()) - .build(); - - List spans = writeSomeSpans(config); - - SpanExporter exporter = mock(); - ArgumentCaptor> capture = ArgumentCaptor.forClass(Collection.class); - when(exporter.export(capture.capture())).thenReturn(CompletableResultCode.ofSuccess()); - - SpanFromDiskExporter testClass = SpanFromDiskExporter.create(exporter, config); - boolean result = testClass.exportStoredBatch(30, TimeUnit.SECONDS); - assertThat(result).isTrue(); - List exportedSpans = (List) capture.getValue(); - - long now = spans.get(0).getStartEpochNanos(); - SpanData expected1 = makeSpan1(TraceFlags.getSampled(), now); - SpanData expected2 = makeSpan2(TraceFlags.getSampled(), now); - - assertThat(exportedSpans.get(0)).isEqualTo(expected1); - assertThat(exportedSpans.get(1)).isEqualTo(expected2); - assertThat(exportedSpans).containsExactly(expected1, expected2); - - verify(exporter).export(eq(Arrays.asList(expected1, expected2))); - } - - private static List writeSomeSpans(StorageConfiguration config) throws Exception { - long now = System.currentTimeMillis() * 1_000_000; - SpanData span1 = makeSpan1(TraceFlags.getDefault(), now); - SpanData span2 = makeSpan2(TraceFlags.getSampled(), now); - List spans = Arrays.asList(span1, span2); - - SignalSerializer serializer = SignalSerializer.ofSpans(); - File subdir = new File(config.getRootDir(), SignalTypes.spans.name()); - assertTrue(subdir.mkdir()); - - Storage storage = - Storage.builder() - .setStorageConfiguration(config) - .setFolderName(SignalTypes.spans.name()) - .build(); - storage.write(serializer.serialize(spans)); - storage.close(); - return spans; - } - - private static SpanData makeSpan1(TraceFlags parentSpanContextFlags, long now) { - Attributes attributes = Attributes.of(AttributeKey.stringKey("foo"), "bar"); - SpanContext parentContext = TestData.makeContext(parentSpanContextFlags, TestData.SPAN_ID); - return SpanDataImpl.builder() - .setName("span1") - .setSpanContext( - SpanContext.create( - TestData.TRACE_ID, - TestData.SPAN_ID, - TraceFlags.getDefault(), - TraceState.getDefault())) - .setParentSpanContext(parentContext) - .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) - .setStatus(StatusData.create(StatusCode.OK, "whatever")) - .setAttributes(attributes) - .setKind(SpanKind.SERVER) - .setStartEpochNanos(now) - .setEndEpochNanos(now + 50_000_000) - .setTotalRecordedEvents(0) - .setTotalRecordedLinks(0) - .setTotalAttributeCount(attributes.size()) - .setLinks(Collections.emptyList()) - .setEvents(Collections.emptyList()) - .setResource(Resource.getDefault()) - .build(); - } - - private static SpanData makeSpan2(TraceFlags parentSpanContextFlags, long now) { - Attributes attributes = Attributes.of(AttributeKey.stringKey("bar"), "baz"); - String spanId = "aaaaaaaaa12312312"; - SpanContext parentContext = TestData.makeContext(parentSpanContextFlags, spanId); - return SpanDataImpl.builder() - .setName("span2") - .setSpanContext( - SpanContext.create( - TestData.TRACE_ID, spanId, TraceFlags.getSampled(), TraceState.getDefault())) - .setParentSpanContext(parentContext) - .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) - .setStatus(StatusData.create(StatusCode.OK, "excellent")) - .setAttributes(attributes) - .setKind(SpanKind.CLIENT) - .setStartEpochNanos(now + 12) - .setEndEpochNanos(now + 12 + 40_000_000) - .setTotalRecordedEvents(0) - .setTotalRecordedLinks(0) - .setTotalAttributeCount(attributes.size()) - .setLinks(Collections.emptyList()) - .setEvents(Collections.emptyList()) - .setResource(Resource.getDefault()) - .build(); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporterTest.java deleted file mode 100644 index 96dcfcaa9..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/SpanToDiskExporterTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import io.opentelemetry.contrib.disk.buffering.internal.exporter.ToDiskExporter; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.trace.data.SpanData; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class SpanToDiskExporterTest { - @Mock private ToDiskExporter delegate; - - @Test - void delegateShutdown_success() throws IOException { - SpanToDiskExporter testClass = new SpanToDiskExporter(delegate); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isTrue(); - verify(delegate).shutdown(); - } - - @Test - void delegateShutdown_fail() throws IOException { - doThrow(new IOException("boom")).when(delegate).shutdown(); - SpanToDiskExporter testClass = new SpanToDiskExporter(delegate); - CompletableResultCode result = testClass.shutdown(); - assertThat(result.isSuccess()).isFalse(); - verify(delegate).shutdown(); - } - - @Test - void delegateExport() { - SpanData span1 = mock(); - SpanData span2 = mock(); - List spans = Arrays.asList(span1, span2); - - SpanToDiskExporter testClass = new SpanToDiskExporter(delegate); - testClass.export(spans); - - verify(delegate).export(spans); - } - - @Test - void flushReturnsSuccess() { - SpanToDiskExporter testClass = new SpanToDiskExporter(delegate); - CompletableResultCode result = testClass.flush(); - assertThat(result.isSuccess()).isTrue(); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilderTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilderTest.java deleted file mode 100644 index 288388e03..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterBuilderTest.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.sdk.trace.data.SpanData; -import java.io.File; -import org.junit.jupiter.api.Test; - -class ToDiskExporterBuilderTest { - - @Test - void whenMinFileReadIsNotGraterThanMaxFileWrite_throwException() { - StorageConfiguration invalidConfig = - StorageConfiguration.builder() - .setMaxFileAgeForWriteMillis(2) - .setMinFileAgeForReadMillis(1) - .setRootDir(new File(".")) - .build(); - - assertThatThrownBy( - () -> ToDiskExporter.builder().setStorageConfiguration(invalidConfig)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "The configured max file age for writing must be lower than the configured min file age for reading"); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterTest.java deleted file mode 100644 index 865aa6298..000000000 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporter/ToDiskExporterTest.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.disk.buffering.internal.exporter; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.Storage; -import io.opentelemetry.sdk.common.CompletableResultCode; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class ToDiskExporterTest { - - private final List records = Arrays.asList("one", "two", "three"); - - private final byte[] serialized = "one,two,three".getBytes(UTF_8); - - @Mock private SignalSerializer serializer; - - @Mock private Storage storage; - private ToDiskExporter toDiskExporter; - private Function, CompletableResultCode> exportFn; - private Collection exportedFnSeen; - private AtomicReference exportFnResultToReturn; - - @BeforeEach - void setup() { - exportedFnSeen = null; - exportFnResultToReturn = new AtomicReference<>(null); - exportFn = - (Collection x) -> { - exportedFnSeen = x; - return exportFnResultToReturn.get(); - }; - toDiskExporter = new ToDiskExporter<>(serializer, exportFn, storage, true); - when(serializer.serialize(records)).thenReturn(serialized); - } - - @Test - void whenWritingSucceedsOnExport_returnSuccessfulResultCode() throws Exception { - when(storage.write(serialized)).thenReturn(true); - CompletableResultCode completableResultCode = toDiskExporter.export(records); - assertThat(completableResultCode.isSuccess()).isTrue(); - verify(storage).write(serialized); - assertThat(exportedFnSeen).isNull(); - } - - @Test - void whenWritingFailsOnExport_doExportRightAway() throws Exception { - when(storage.write(serialized)).thenReturn(false); - exportFnResultToReturn.set(CompletableResultCode.ofSuccess()); - - CompletableResultCode completableResultCode = toDiskExporter.export(records); - - assertThat(completableResultCode.isSuccess()).isTrue(); - assertThat(exportedFnSeen).isEqualTo(records); - } - - @Test - void whenExceptionInWrite_doExportRightAway() throws Exception { - when(storage.write(serialized)).thenThrow(new IOException("boom")); - exportFnResultToReturn.set(CompletableResultCode.ofFailure()); - - CompletableResultCode completableResultCode = toDiskExporter.export(records); - - assertThat(completableResultCode.isSuccess()).isFalse(); - assertThat(exportedFnSeen).isEqualTo(records); - } - - @Test - void shutdownClosesStorage() throws Exception { - toDiskExporter.export(records); - toDiskExporter.shutdown(); - verify(storage).close(); - } -} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporterTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporterTest.java new file mode 100644 index 000000000..30bfe310a --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/exporters/SignalStorageExporterTest.java @@ -0,0 +1,135 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.exporters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import io.opentelemetry.contrib.disk.buffering.exporters.callback.ExporterCallback; +import io.opentelemetry.contrib.disk.buffering.storage.SignalStorage; +import io.opentelemetry.contrib.disk.buffering.storage.result.WriteResult; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import javax.annotation.Nonnull; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@SuppressWarnings("unchecked") +@ExtendWith(MockitoExtension.class) +class SignalStorageExporterTest { + @Mock private ExporterCallback callback; + + @Test + void verifyExportToStorage_success() { + SignalStorage.Span storage = new TestSpanStorage(); + SignalStorageExporter storageExporter = + new SignalStorageExporter<>(storage, callback, Duration.ofSeconds(1)); + SpanData item1 = mock(); + SpanData item2 = mock(); + SpanData item3 = mock(); + + List items = Arrays.asList(item1, item2); + CompletableResultCode resultCode = storageExporter.exportToStorage(items); + + assertThat(resultCode.isSuccess()).isTrue(); + verify(callback).onExportSuccess(items); + verifyNoMoreInteractions(callback); + + // Adding more items + clearInvocations(callback); + List items2 = Collections.singletonList(item3); + resultCode = storageExporter.exportToStorage(items2); + + assertThat(resultCode.isSuccess()).isTrue(); + verify(callback).onExportSuccess(items2); + verifyNoMoreInteractions(callback); + + // Checking items + List storedItems = new ArrayList<>(); + for (Collection collection : storage) { + storedItems.addAll(collection); + } + assertThat(storedItems).containsExactly(item1, item2, item3); + } + + @Test + void verifyExportToStorage_failure() { + SignalStorage.Span storage = mock(); + SignalStorageExporter storageExporter = + new SignalStorageExporter<>(storage, callback, Duration.ofSeconds(1)); + SpanData item1 = mock(); + + // Without exception + when(storage.write(anyCollection())) + .thenReturn(CompletableFuture.completedFuture(WriteResult.error(null))); + + List items = Collections.singletonList(item1); + CompletableResultCode resultCode = storageExporter.exportToStorage(items); + + assertThat(resultCode.isSuccess()).isFalse(); + assertThat(resultCode.getFailureThrowable()).isNull(); + verify(callback).onExportError(items, null); + verifyNoMoreInteractions(callback); + + // With exception + clearInvocations(callback); + Exception exception = new Exception(); + when(storage.write(anyCollection())) + .thenReturn(CompletableFuture.completedFuture(WriteResult.error(exception))); + + resultCode = storageExporter.exportToStorage(items); + + assertThat(resultCode.isSuccess()).isFalse(); + assertThat(resultCode.getFailureThrowable()).isEqualTo(exception); + verify(callback).onExportError(items, exception); + verifyNoMoreInteractions(callback); + } + + private static class TestSpanStorage implements SignalStorage.Span { + private final List> storedItems = new ArrayList<>(); + + @Override + public CompletableFuture write(Collection items) { + storedItems.add(items); + return getSuccessfulFuture(); + } + + @Override + public CompletableFuture clear() { + storedItems.clear(); + return getSuccessfulFuture(); + } + + @Override + public void close() {} + + @Nonnull + @Override + public Iterator> iterator() { + return storedItems.iterator(); + } + + @Nonnull + private static CompletableFuture getSuccessfulFuture() { + return CompletableFuture.completedFuture(WriteResult.successful()); + } + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializerTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializerTest.java new file mode 100644 index 000000000..a428956a7 --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/LogRecordDataDeserializerTest.java @@ -0,0 +1,61 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.opentelemetry.api.common.Value; +import io.opentelemetry.api.logs.Severity; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.models.LogRecordDataImpl; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.testutils.BaseSignalSerializerTest; +import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.sdk.logs.data.LogRecordData; +import org.junit.jupiter.api.Test; + +class LogRecordDataDeserializerTest extends BaseSignalSerializerTest { + private static final LogRecordData LOG_RECORD = + LogRecordDataImpl.builder() + .setResource(TestData.RESOURCE_FULL) + .setSpanContext(TestData.SPAN_CONTEXT) + .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) + .setAttributes(TestData.ATTRIBUTES) + .setBodyValue(Value.of("Log body")) + .setSeverity(Severity.DEBUG) + .setSeverityText("Log severity text") + .setTimestampEpochNanos(100L) + .setObservedTimestampEpochNanos(200L) + .setTotalAttributeCount(3) + .setEventName("event") + .build(); + + @Test + void verifyDeserialization() { + assertSerializeDeserialize(LOG_RECORD, LOG_RECORD); + } + + @Test + void whenDecodingMalformedMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeMalformedSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Test + void whenDecodingTooShortMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeTooShortSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Override + protected SignalSerializer getSerializer() { + return SignalSerializer.ofLogs(); + } + + @Override + protected SignalDeserializer getDeserializer() { + return SignalDeserializer.ofLogs(); + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializerTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializerTest.java new file mode 100644 index 000000000..7364d865f --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/MetricDataDeserializerTest.java @@ -0,0 +1,39 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.testutils.BaseSignalSerializerTest; +import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import org.junit.jupiter.api.Test; + +class MetricDataDeserializerTest extends BaseSignalSerializerTest { + + @Test + void whenDecodingMalformedMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeMalformedSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Test + void whenDecodingTooShortMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeTooShortSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Override + protected SignalSerializer getSerializer() { + return SignalSerializer.ofMetrics(); + } + + @Override + protected SignalDeserializer getDeserializer() { + return SignalDeserializer.ofMetrics(); + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializerTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializerTest.java new file mode 100644 index 000000000..1b9c61874 --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/deserializers/SpanDataDeserializerTest.java @@ -0,0 +1,66 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.models.SpanDataImpl; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; +import io.opentelemetry.contrib.disk.buffering.testutils.BaseSignalSerializerTest; +import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.data.StatusData; +import java.util.Collections; +import org.junit.jupiter.api.Test; + +class SpanDataDeserializerTest extends BaseSignalSerializerTest { + private static final SpanData SPAN_DATA = + SpanDataImpl.builder() + .setSpanContext(TestData.SPAN_CONTEXT) + .setParentSpanContext(TestData.PARENT_SPAN_CONTEXT) + .setName("Test span") + .setKind(SpanKind.SERVER) + .setStartEpochNanos(100L) + .setEndEpochNanos(200L) + .setStatus(StatusData.ok()) + .setAttributes(TestData.ATTRIBUTES) + .setResource(TestData.RESOURCE_FULL) + .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) + .setTotalRecordedLinks(0) + .setTotalRecordedEvents(0) + .setTotalAttributeCount(0) + .setEvents(Collections.emptyList()) + .setLinks(Collections.emptyList()) + .build(); + + @Test + void verifyDeserialization() { + assertSerializeDeserialize(SPAN_DATA, SPAN_DATA); + } + + @Test + void whenDecodingMalformedMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeMalformedSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Test + void whenDecodingTooShortMessage_wrapIntoDeserializationException() { + assertThatThrownBy(() -> getDeserializer().deserialize(TestData.makeTooShortSignalBinary())) + .isInstanceOf(DeserializationException.class); + } + + @Override + protected SignalSerializer getSerializer() { + return SignalSerializer.ofSpans(); + } + + @Override + protected SignalDeserializer getDeserializer() { + return SignalDeserializer.ofSpans(); + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapperTest.java index 2857cf8da..a5db9382f 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/AttributesMapperTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.common; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -21,6 +21,7 @@ void verifyMapping() { Attributes attributes = Attributes.builder() .put(AttributeKey.stringKey("someString"), "someValue") + .put(AttributeKey.stringKey("emptyString"), "") .put(AttributeKey.booleanKey("someBool"), true) .put(AttributeKey.longKey("someLong"), 10L) .put(AttributeKey.doubleKey("someDouble"), 10.0) @@ -28,7 +29,7 @@ void verifyMapping() { List proto = mapToProto(attributes); - assertEquals(attributes, mapFromProto(proto)); + assertThat(mapFromProto(proto)).isEqualTo(attributes); } @Test @@ -45,7 +46,7 @@ void verifyArrayMapping() { List serialized = mapToProto(attributes); - assertEquals(attributes, mapFromProto(serialized)); + assertThat(mapFromProto(serialized)).isEqualTo(attributes); } private static List mapToProto(Attributes attributes) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ResourceMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ResourceMapperTest.java index e9feb8be4..9776cd068 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ResourceMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/common/ResourceMapperTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.common; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.contrib.disk.buffering.testutils.TestData; import io.opentelemetry.proto.resource.v1.Resource; @@ -17,7 +17,8 @@ class ResourceMapperTest { void verifyMapping() { Resource proto = mapToProto(TestData.RESOURCE_FULL); - assertEquals(TestData.RESOURCE_FULL, mapToSdk(proto, TestData.RESOURCE_FULL.getSchemaUrl())); + assertThat(mapToSdk(proto, TestData.RESOURCE_FULL.getSchemaUrl())) + .isEqualTo(TestData.RESOURCE_FULL); } private static Resource mapToProto(io.opentelemetry.sdk.resources.Resource sdkResource) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapperTest.java index 3eb588b45..56710355f 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/LogRecordDataMapperTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.common.Value; import io.opentelemetry.api.logs.Severity; @@ -31,15 +31,15 @@ class LogRecordDataMapperTest { .setTimestampEpochNanos(100L) .setObservedTimestampEpochNanos(200L) .setTotalAttributeCount(3) + .setEventName("my.event.name") .build(); @Test void verifyMapping() { LogRecord proto = mapToProto(LOG_RECORD); - assertEquals( - LOG_RECORD, - mapToSdk(proto, LOG_RECORD.getResource(), LOG_RECORD.getInstrumentationScopeInfo())); + assertThat(mapToSdk(proto, LOG_RECORD.getResource(), LOG_RECORD.getInstrumentationScopeInfo())) + .isEqualTo(LOG_RECORD); } private static LogRecord mapToProto(LogRecordData data) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapperTest.java index 45c3f6e5e..7242bd65e 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/logs/ProtoLogsDataMapperTest.java @@ -6,14 +6,13 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; import io.opentelemetry.api.common.Value; import io.opentelemetry.api.logs.Severity; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.models.LogRecordDataImpl; import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest; import io.opentelemetry.proto.logs.v1.LogRecord; -import io.opentelemetry.proto.logs.v1.LogsData; import io.opentelemetry.proto.logs.v1.ResourceLogs; import io.opentelemetry.proto.logs.v1.ScopeLogs; import io.opentelemetry.sdk.logs.data.LogRecordData; @@ -37,6 +36,7 @@ class ProtoLogsDataMapperTest { .setTimestampEpochNanos(100L) .setObservedTimestampEpochNanos(200L) .setTotalAttributeCount(3) + .setEventName("") .build(); private static final LogRecordData OTHER_LOG_RECORD = @@ -51,6 +51,7 @@ class ProtoLogsDataMapperTest { .setTimestampEpochNanos(100L) .setObservedTimestampEpochNanos(200L) .setTotalAttributeCount(3) + .setEventName("") .build(); private static final LogRecordData LOG_RECORD_WITH_DIFFERENT_SCOPE_SAME_RESOURCE = @@ -65,6 +66,7 @@ class ProtoLogsDataMapperTest { .setTimestampEpochNanos(100L) .setObservedTimestampEpochNanos(200L) .setTotalAttributeCount(3) + .setEventName("") .build(); private static final LogRecordData LOG_RECORD_WITH_DIFFERENT_RESOURCE = @@ -79,18 +81,34 @@ class ProtoLogsDataMapperTest { .setTimestampEpochNanos(100L) .setObservedTimestampEpochNanos(200L) .setTotalAttributeCount(3) + .setEventName("") + .build(); + + private static final LogRecordData LOG_RECORD_WITH_EVENT_NAME = + LogRecordDataImpl.builder() + .setResource(TestData.RESOURCE_FULL) + .setSpanContext(TestData.SPAN_CONTEXT) + .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) + .setAttributes(TestData.ATTRIBUTES) + .setBodyValue(Value.of("Log body")) + .setSeverity(Severity.DEBUG) + .setSeverityText("Log severity text") + .setTimestampEpochNanos(100L) + .setObservedTimestampEpochNanos(200L) + .setTotalAttributeCount(3) + .setEventName("test.event.name") .build(); @Test void verifyConversionDataStructure() { List signals = Collections.singletonList(LOG_RECORD); - LogsData result = mapToProto(signals); + ExportLogsServiceRequest result = mapToProto(signals); List resourceLogsList = result.resource_logs; - assertEquals(1, resourceLogsList.size()); - assertEquals(1, resourceLogsList.get(0).scope_logs.size()); - assertEquals(1, resourceLogsList.get(0).scope_logs.get(0).log_records.size()); + assertThat(resourceLogsList).hasSize(1); + assertThat(resourceLogsList.get(0).scope_logs).hasSize(1); + assertThat(resourceLogsList.get(0).scope_logs.get(0).log_records).hasSize(1); assertThat(mapFromProto(result)).containsExactlyInAnyOrderElementsOf(signals); } @@ -99,18 +117,18 @@ void verifyConversionDataStructure() { void verifyMultipleLogsWithSameResourceAndScope() { List signals = Arrays.asList(LOG_RECORD, OTHER_LOG_RECORD); - LogsData proto = mapToProto(signals); + ExportLogsServiceRequest proto = mapToProto(signals); List resourceLogsList = proto.resource_logs; - assertEquals(1, resourceLogsList.size()); + assertThat(resourceLogsList).hasSize(1); List scopeLogsList = resourceLogsList.get(0).scope_logs; - assertEquals(1, scopeLogsList.size()); + assertThat(scopeLogsList).hasSize(1); List logRecords = scopeLogsList.get(0).log_records; - assertEquals(2, logRecords.size()); - assertEquals("Log body", logRecords.get(0).body.string_value); - assertEquals("Other log body", logRecords.get(1).body.string_value); + assertThat(logRecords).hasSize(2); + assertThat(logRecords.get(0).body.string_value).isEqualTo("Log body"); + assertThat(logRecords.get(1).body.string_value).isEqualTo("Other log body"); - assertEquals(2, mapFromProto(proto).size()); + assertThat(mapFromProto(proto)).hasSize(2); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } @@ -120,18 +138,18 @@ void verifyMultipleLogsWithSameResourceDifferentScope() { List signals = Arrays.asList(LOG_RECORD, LOG_RECORD_WITH_DIFFERENT_SCOPE_SAME_RESOURCE); - LogsData proto = mapToProto(signals); + ExportLogsServiceRequest proto = mapToProto(signals); List resourceLogsList = proto.resource_logs; - assertEquals(1, resourceLogsList.size()); + assertThat(resourceLogsList).hasSize(1); List scopeLogsList = resourceLogsList.get(0).scope_logs; - assertEquals(2, scopeLogsList.size()); + assertThat(scopeLogsList).hasSize(2); ScopeLogs firstScope = scopeLogsList.get(0); ScopeLogs secondScope = scopeLogsList.get(1); List firstScopeLogs = firstScope.log_records; List secondScopeLogs = secondScope.log_records; - assertEquals(1, firstScopeLogs.size()); - assertEquals(1, secondScopeLogs.size()); + assertThat(firstScopeLogs).hasSize(1); + assertThat(secondScopeLogs).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } @@ -140,31 +158,44 @@ void verifyMultipleLogsWithSameResourceDifferentScope() { void verifyMultipleLogsWithDifferentResource() { List signals = Arrays.asList(LOG_RECORD, LOG_RECORD_WITH_DIFFERENT_RESOURCE); - LogsData proto = mapToProto(signals); + ExportLogsServiceRequest proto = mapToProto(signals); List resourceLogsList = proto.resource_logs; - assertEquals(2, resourceLogsList.size()); + assertThat(resourceLogsList).hasSize(2); ResourceLogs firstResourceLogs = resourceLogsList.get(0); ResourceLogs secondResourceLogs = resourceLogsList.get(1); List firstScopeLogsList = firstResourceLogs.scope_logs; List secondScopeLogsList = secondResourceLogs.scope_logs; - assertEquals(1, firstScopeLogsList.size()); - assertEquals(1, secondScopeLogsList.size()); + assertThat(firstScopeLogsList).hasSize(1); + assertThat(secondScopeLogsList).hasSize(1); ScopeLogs firstScope = firstScopeLogsList.get(0); ScopeLogs secondScope = secondScopeLogsList.get(0); List firstScopeLogs = firstScope.log_records; List secondScopeLogs = secondScope.log_records; - assertEquals(1, firstScopeLogs.size()); - assertEquals(1, secondScopeLogs.size()); + assertThat(firstScopeLogs).hasSize(1); + assertThat(secondScopeLogs).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } - private static LogsData mapToProto(Collection signals) { + @Test + void verifyLogWithEventName() { + List signals = Collections.singletonList(LOG_RECORD_WITH_EVENT_NAME); + + ExportLogsServiceRequest result = mapToProto(signals); + + List resourceLogsList = result.resource_logs; + LogRecord firstLog = resourceLogsList.get(0).scope_logs.get(0).log_records.get(0); + + assertThat(firstLog.event_name).isEqualTo("test.event.name"); + assertThat(mapFromProto(result)).containsExactlyInAnyOrderElementsOf(signals); + } + + private static ExportLogsServiceRequest mapToProto(Collection signals) { return ProtoLogsDataMapper.getInstance().toProto(signals); } - private static List mapFromProto(LogsData protoData) { + private static List mapFromProto(ExportLogsServiceRequest protoData) { return ProtoLogsDataMapper.getInstance().fromProto(protoData); } } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapperTest.java index b4f7f64d9..b5e804e29 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/MetricDataMapperTest.java @@ -9,7 +9,6 @@ import static io.opentelemetry.contrib.disk.buffering.testutils.TestData.makeLongGauge; import static io.opentelemetry.contrib.disk.buffering.testutils.TestData.makeLongPointData; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.TraceFlags; @@ -121,9 +120,10 @@ void verifySummaryMapping() { Metric proto = mapToProto(summaryMetric); - assertEquals( - summaryMetric, - mapToSdk(proto, summaryMetric.getResource(), summaryMetric.getInstrumentationScopeInfo())); + assertThat( + mapToSdk( + proto, summaryMetric.getResource(), summaryMetric.getInstrumentationScopeInfo())) + .isEqualTo(summaryMetric); } @Test diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapperTest.java index b45e9c9e7..f114fe04d 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/metrics/ProtoMetricsDataMapperTest.java @@ -6,12 +6,11 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.metrics; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; import io.opentelemetry.api.trace.TraceFlags; import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest; import io.opentelemetry.proto.metrics.v1.Metric; -import io.opentelemetry.proto.metrics.v1.MetricsData; import io.opentelemetry.proto.metrics.v1.ResourceMetrics; import io.opentelemetry.proto.metrics.v1.ScopeMetrics; import io.opentelemetry.sdk.metrics.data.MetricData; @@ -30,12 +29,12 @@ void verifyConversionDataStructure() { MetricData expectedGauge1 = TestData.makeLongGauge(TraceFlags.getSampled()); List expectedSignals = Collections.singletonList(expectedGauge1); - MetricsData proto = mapToProto(signals); + ExportMetricsServiceRequest proto = mapToProto(signals); List resourceMetrics = proto.resource_metrics; - assertEquals(1, resourceMetrics.size()); - assertEquals(1, resourceMetrics.get(0).scope_metrics.size()); - assertEquals(1, resourceMetrics.get(0).scope_metrics.get(0).metrics.size()); + assertThat(resourceMetrics).hasSize(1); + assertThat(resourceMetrics.get(0).scope_metrics).hasSize(1); + assertThat(resourceMetrics.get(0).scope_metrics.get(0).metrics).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(expectedSignals); } @@ -49,14 +48,14 @@ void verifyMultipleMetricsWithSameResourceAndScope() { MetricData expectedGauge2 = TestData.makeLongGauge(TraceFlags.getSampled()); List expectedSignals = Arrays.asList(expectedGauge1, expectedGauge2); - MetricsData proto = mapToProto(signals); + ExportMetricsServiceRequest proto = mapToProto(signals); List resourceMetrics = proto.resource_metrics; - assertEquals(1, resourceMetrics.size()); + assertThat(resourceMetrics).hasSize(1); List scopeMetrics = resourceMetrics.get(0).scope_metrics; - assertEquals(1, scopeMetrics.size()); + assertThat(scopeMetrics).hasSize(1); List metrics = scopeMetrics.get(0).metrics; - assertEquals(2, metrics.size()); + assertThat(metrics).hasSize(2); List result = mapFromProto(proto); @@ -78,18 +77,18 @@ void verifyMultipleMetricsWithSameResourceDifferentScope() { List signals = Arrays.asList(gauge1, gauge2); List expectedSignals = Arrays.asList(expectedGauge1, expectedGauge2); - MetricsData proto = mapToProto(signals); + ExportMetricsServiceRequest proto = mapToProto(signals); List resourceMetrics = proto.resource_metrics; - assertEquals(1, resourceMetrics.size()); + assertThat(resourceMetrics).hasSize(1); List scopeMetrics = resourceMetrics.get(0).scope_metrics; - assertEquals(2, scopeMetrics.size()); + assertThat(scopeMetrics).hasSize(2); ScopeMetrics firstScope = scopeMetrics.get(0); ScopeMetrics secondScope = scopeMetrics.get(1); List firstScopeMetrics = firstScope.metrics; List secondScopeMetrics = secondScope.metrics; - assertEquals(1, firstScopeMetrics.size()); - assertEquals(1, secondScopeMetrics.size()); + assertThat(firstScopeMetrics).hasSize(1); + assertThat(secondScopeMetrics).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(expectedSignals); } @@ -113,31 +112,31 @@ void verifyMultipleMetricsWithDifferentResource() { // , LONG_GAUGE_METRIC_WITH_DIFFERENT_RESOURCE); // List expectedSignals = Arrays.asList(expected); - MetricsData proto = mapToProto(signals); + ExportMetricsServiceRequest proto = mapToProto(signals); List resourceMetrics = proto.resource_metrics; - assertEquals(2, resourceMetrics.size()); + assertThat(resourceMetrics).hasSize(2); ResourceMetrics firstResourceMetrics = resourceMetrics.get(0); ResourceMetrics secondResourceMetrics = resourceMetrics.get(1); List firstScopeMetrics = firstResourceMetrics.scope_metrics; List secondScopeMetrics = secondResourceMetrics.scope_metrics; - assertEquals(1, firstScopeMetrics.size()); - assertEquals(1, secondScopeMetrics.size()); + assertThat(firstScopeMetrics).hasSize(1); + assertThat(secondScopeMetrics).hasSize(1); ScopeMetrics firstScope = firstScopeMetrics.get(0); ScopeMetrics secondScope = secondScopeMetrics.get(0); List firstMetrics = firstScope.metrics; List secondMetrics = secondScope.metrics; - assertEquals(1, firstMetrics.size()); - assertEquals(1, secondMetrics.size()); + assertThat(firstMetrics).hasSize(1); + assertThat(secondMetrics).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(expectedSignals); } - private static MetricsData mapToProto(Collection signals) { + private static ExportMetricsServiceRequest mapToProto(Collection signals) { return ProtoMetricsDataMapper.getInstance().toProto(signals); } - private static List mapFromProto(MetricsData protoData) { + private static List mapFromProto(ExportMetricsServiceRequest protoData) { return ProtoMetricsDataMapper.getInstance().fromProto(protoData); } } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapperTest.java index bdd9c053c..910455463 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/ProtoSpansDataMapperTest.java @@ -6,15 +6,14 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.models.SpanDataImpl; import io.opentelemetry.contrib.disk.buffering.testutils.TestData; +import io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest; import io.opentelemetry.proto.trace.v1.ResourceSpans; import io.opentelemetry.proto.trace.v1.ScopeSpans; import io.opentelemetry.proto.trace.v1.Span; -import io.opentelemetry.proto.trace.v1.TracesData; import io.opentelemetry.sdk.trace.data.EventData; import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.SpanData; @@ -116,12 +115,12 @@ class ProtoSpansDataMapperTest { void verifyConversionDataStructure() { List signals = Collections.singletonList(SPAN_DATA); - TracesData proto = mapToProto(signals); + ExportTraceServiceRequest proto = mapToProto(signals); List resourceSpans = proto.resource_spans; - assertEquals(1, resourceSpans.size()); - assertEquals(1, resourceSpans.get(0).scope_spans.size()); - assertEquals(1, resourceSpans.get(0).scope_spans.get(0).spans.size()); + assertThat(resourceSpans).hasSize(1); + assertThat(resourceSpans.get(0).scope_spans).hasSize(1); + assertThat(resourceSpans.get(0).scope_spans.get(0).spans).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } @@ -130,14 +129,14 @@ void verifyConversionDataStructure() { void verifyMultipleSpansWithSameResourceAndScope() { List signals = Arrays.asList(SPAN_DATA, OTHER_SPAN_DATA); - TracesData proto = mapToProto(signals); + ExportTraceServiceRequest proto = mapToProto(signals); List resourceSpans = proto.resource_spans; - assertEquals(1, resourceSpans.size()); + assertThat(resourceSpans).hasSize(1); List scopeSpans = resourceSpans.get(0).scope_spans; - assertEquals(1, scopeSpans.size()); + assertThat(scopeSpans).hasSize(1); List spans = scopeSpans.get(0).spans; - assertEquals(2, spans.size()); + assertThat(spans).hasSize(2); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } @@ -146,18 +145,18 @@ void verifyMultipleSpansWithSameResourceAndScope() { void verifyMultipleSpansWithSameResourceDifferentScope() { List signals = Arrays.asList(SPAN_DATA, SPAN_DATA_WITH_DIFFERENT_SCOPE_SAME_RESOURCE); - TracesData proto = mapToProto(signals); + ExportTraceServiceRequest proto = mapToProto(signals); List resourceSpans = proto.resource_spans; - assertEquals(1, resourceSpans.size()); + assertThat(resourceSpans).hasSize(1); List scopeSpans = resourceSpans.get(0).scope_spans; - assertEquals(2, scopeSpans.size()); + assertThat(scopeSpans).hasSize(2); ScopeSpans firstScope = scopeSpans.get(0); ScopeSpans secondScope = scopeSpans.get(1); List firstScopeSpans = firstScope.spans; List secondScopeSpans = secondScope.spans; - assertEquals(1, firstScopeSpans.size()); - assertEquals(1, secondScopeSpans.size()); + assertThat(firstScopeSpans).hasSize(1); + assertThat(secondScopeSpans).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } @@ -166,31 +165,31 @@ void verifyMultipleSpansWithSameResourceDifferentScope() { void verifyMultipleSpansWithDifferentResource() { List signals = Arrays.asList(SPAN_DATA, SPAN_DATA_WITH_DIFFERENT_RESOURCE); - TracesData proto = mapToProto(signals); + ExportTraceServiceRequest proto = mapToProto(signals); List resourceSpans = proto.resource_spans; - assertEquals(2, resourceSpans.size()); + assertThat(resourceSpans).hasSize(2); ResourceSpans firstResourceSpans = resourceSpans.get(0); ResourceSpans secondResourceSpans = resourceSpans.get(1); List firstScopeSpans = firstResourceSpans.scope_spans; List secondScopeSpans = secondResourceSpans.scope_spans; - assertEquals(1, firstScopeSpans.size()); - assertEquals(1, secondScopeSpans.size()); + assertThat(firstScopeSpans).hasSize(1); + assertThat(secondScopeSpans).hasSize(1); ScopeSpans firstScope = firstScopeSpans.get(0); ScopeSpans secondScope = secondScopeSpans.get(0); List firstSpans = firstScope.spans; List secondSpans = secondScope.spans; - assertEquals(1, firstSpans.size()); - assertEquals(1, secondSpans.size()); + assertThat(firstSpans).hasSize(1); + assertThat(secondSpans).hasSize(1); assertThat(mapFromProto(proto)).containsExactlyInAnyOrderElementsOf(signals); } - private static TracesData mapToProto(Collection signals) { + private static ExportTraceServiceRequest mapToProto(Collection signals) { return ProtoSpansDataMapper.getInstance().toProto(signals); } - private static List mapFromProto(TracesData protoData) { + private static List mapFromProto(ExportTraceServiceRequest protoData) { return ProtoSpansDataMapper.getInstance().fromProto(protoData); } } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/SpanDataMapperTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/SpanDataMapperTest.java index de8f8ff78..43394431f 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/SpanDataMapperTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/mapping/spans/SpanDataMapperTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.spans.models.SpanDataImpl; @@ -74,21 +74,20 @@ class SpanDataMapperTest { void verifyMapping() { Span proto = mapToProto(SPAN_DATA); - assertEquals( - SPAN_DATA, - mapToSdk(proto, SPAN_DATA.getResource(), SPAN_DATA.getInstrumentationScopeInfo())); + assertThat(mapToSdk(proto, SPAN_DATA.getResource(), SPAN_DATA.getInstrumentationScopeInfo())) + .isEqualTo(SPAN_DATA); } @Test void verifyMappingWithTraceState() { Span proto = mapToProto(SPAN_DATA_WITH_TRACE_STATE); - assertEquals( - SPAN_DATA_WITH_TRACE_STATE, - mapToSdk( - proto, - SPAN_DATA_WITH_TRACE_STATE.getResource(), - SPAN_DATA_WITH_TRACE_STATE.getInstrumentationScopeInfo())); + assertThat( + mapToSdk( + proto, + SPAN_DATA_WITH_TRACE_STATE.getResource(), + SPAN_DATA_WITH_TRACE_STATE.getInstrumentationScopeInfo())) + .isEqualTo(SPAN_DATA_WITH_TRACE_STATE); } private static Span mapToProto(SpanData source) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/ByteArraySerializer.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/ByteArraySerializer.java new file mode 100644 index 000000000..7ad446729 --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/ByteArraySerializer.java @@ -0,0 +1,39 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Collection; + +public final class ByteArraySerializer implements SignalSerializer { + + private final byte[] data; + + public ByteArraySerializer(byte[] data) { + this.data = data; + } + + @CanIgnoreReturnValue + @Override + public SignalSerializer initialize(Collection data) { + return null; + } + + @Override + public void writeBinaryTo(OutputStream output) throws IOException { + output.write(data); + } + + @Override + public int getBinarySerializedSize() { + return data.length; + } + + @Override + public void reset() {} +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializerTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializerTest.java index 1b52bb219..ec3212001 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializerTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/serialization/serializers/LogRecordDataSerializerTest.java @@ -28,9 +28,10 @@ class LogRecordDataSerializerTest extends BaseSignalSerializerTest processing; - private ReadableFile readableFile; - private WritableFile writableFile; + private Storage storage; + private SignalSerializer serializer; + private AtomicLong currentTimeMillis; + private static final SignalDeserializer DESERIALIZER = SignalDeserializer.ofLogs(); @BeforeEach - void setUp() throws IOException { - folderManager = mock(); - readableFile = mock(); - writableFile = createWritableFile(); - processing = mock(); - when(readableFile.readAndProcess(processing)).thenReturn(ReadableResult.SUCCEEDED); - storage = new Storage(folderManager, true); + void setUp() { + currentTimeMillis = new AtomicLong(0); + serializer = SignalSerializer.ofLogs(); + folderManager = FolderManager.create(destinationDir, getConfiguration(), new TestClock()); + storage = new Storage<>(folderManager); } - @Test - void whenReadingAndProcessingSuccessfully_returnSuccess() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile); - - assertEquals(ReadableResult.SUCCEEDED, storage.readAndProcess(processing)); - - verify(readableFile).readAndProcess(processing); - } - - @Test - void whenReadableFileProcessingFails_returnTryLater() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile); - when(readableFile.readAndProcess(processing)).thenReturn(TRY_LATER); - - assertEquals(TRY_LATER, storage.readAndProcess(processing)); - - verify(readableFile).readAndProcess(processing); + @AfterEach + void tearDown() throws IOException { + storage.close(); } @Test - void whenReadingMultipleTimes_reuseReader() throws IOException { - ReadableFile anotherReadable = mock(); - when(folderManager.getReadableFile()).thenReturn(readableFile).thenReturn(anotherReadable); - - assertEquals(ReadableResult.SUCCEEDED, storage.readAndProcess(processing)); - assertEquals(ReadableResult.SUCCEEDED, storage.readAndProcess(processing)); + void writeAndRead() throws IOException { + assertThat(write(Arrays.asList(FIRST_LOG_RECORD, SECOND_LOG_RECORD))).isTrue(); + assertThat(write(Collections.singletonList(THIRD_LOG_RECORD))).isTrue(); + assertThat(destinationDir.list()).hasSize(1); + forwardToReadTime(); - verify(readableFile, times(2)).readAndProcess(processing); - verify(folderManager, times(1)).getReadableFile(); - verifyNoInteractions(anotherReadable); - } + ReadableResult readResult = storage.readNext(DESERIALIZER); + assertNotNull(readResult); + assertThat(readResult.getContent()).containsExactly(FIRST_LOG_RECORD, SECOND_LOG_RECORD); + assertThat(destinationDir.list()).hasSize(1); - @Test - void whenWritingMultipleTimes_reuseWriter() throws IOException { - byte[] data = new byte[1]; - WritableFile anotherWriter = createWritableFile(); - when(folderManager.createWritableFile()).thenReturn(writableFile).thenReturn(anotherWriter); + // Delete result and read again + readResult.delete(); + readResult.close(); + ReadableResult readResult2 = storage.readNext(DESERIALIZER); + assertNotNull(readResult2); + assertThat(readResult2.getContent()).containsExactly(THIRD_LOG_RECORD); + assertThat(destinationDir.list()).hasSize(1); - storage.write(data); - storage.write(data); + // Read again without closing previous result + try { + storage.readNext(DESERIALIZER); + fail(); + } catch (IllegalStateException e) { + assertThat(e) + .hasMessage("You must close any previous ReadableResult before requesting a new one"); + } - verify(writableFile, times(2)).append(data); - verify(folderManager, times(1)).createWritableFile(); - verifyNoInteractions(anotherWriter); + // Read again when no more data is available (delete file) + readResult2.close(); + assertNull(storage.readNext(DESERIALIZER)); + assertThat(destinationDir.list()).isEmpty(); } @Test - void whenAttemptingToReadAfterClosed_returnFailed() throws IOException { + void interactionAfterClosed() throws IOException { + assertThat(write(Arrays.asList(FIRST_LOG_RECORD, SECOND_LOG_RECORD))).isTrue(); storage.close(); - assertEquals(ReadableResult.FAILED, storage.readAndProcess(processing)); - } + assertThat(destinationDir.list()).hasSize(1); + forwardToReadTime(); - @Test - void whenAttemptingToWriteAfterClosed_returnFalse() throws IOException { - storage.close(); - assertFalse(storage.write(new byte[1])); - } + // Reading + assertNull(storage.readNext(DESERIALIZER)); - @Test - void whenNoFileAvailableForReading_returnFailed() throws IOException { - assertEquals(ReadableResult.FAILED, storage.readAndProcess(processing)); + // Writing + assertThat(write(Collections.singletonList(THIRD_LOG_RECORD))).isFalse(); } @Test void whenTheReadTimeExpires_lookForNewFileToRead() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile).thenReturn(null); - when(readableFile.readAndProcess(processing)).thenReturn(ReadableResult.FAILED); - - storage.readAndProcess(processing); - - verify(folderManager, times(2)).getReadableFile(); - } - - @Test - void whenNoMoreLinesToRead_lookForNewFileToRead() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile).thenReturn(null); - when(readableFile.readAndProcess(processing)).thenReturn(ReadableResult.FAILED); - - storage.readAndProcess(processing); - - verify(folderManager, times(2)).getReadableFile(); - } - - @Test - void whenResourceClosed_lookForNewFileToRead() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile).thenReturn(null); - when(readableFile.readAndProcess(processing)).thenReturn(ReadableResult.FAILED); - - storage.readAndProcess(processing); - - verify(folderManager, times(2)).getReadableFile(); - } + long firstFileWriteTime = 1000; + long secondFileWriteTime = firstFileWriteTime + MAX_FILE_AGE_FOR_WRITE_MILLIS + 1; + currentTimeMillis.set(firstFileWriteTime); + assertThat(write(Arrays.asList(FIRST_LOG_RECORD, SECOND_LOG_RECORD))).isTrue(); - @Test - void whenEveryNewFileFoundCannotBeRead_returnContentNotAvailable() throws IOException { - when(folderManager.getReadableFile()).thenReturn(readableFile); - when(readableFile.readAndProcess(processing)).thenReturn(ReadableResult.FAILED); - - assertEquals(ReadableResult.FAILED, storage.readAndProcess(processing)); - - verify(folderManager, times(3)).getReadableFile(); - } - - @Test - void appendDataToFile() throws IOException { - when(folderManager.createWritableFile()).thenReturn(writableFile); - byte[] data = new byte[1]; - - storage.write(data); - - verify(writableFile).append(data); - } - - @Test - void whenWritingTimeoutHappens_retryWithNewFile() throws IOException { - byte[] data = new byte[1]; - WritableFile workingWritableFile = createWritableFile(); - when(folderManager.createWritableFile()) - .thenReturn(writableFile) - .thenReturn(workingWritableFile); - when(writableFile.append(data)).thenReturn(WritableResult.FAILED); - - storage.write(data); + // Forward past first file write time + currentTimeMillis.set(secondFileWriteTime); + assertThat(write(Collections.singletonList(THIRD_LOG_RECORD))).isTrue(); + assertThat(destinationDir.list()) + .containsExactlyInAnyOrder( + String.valueOf(firstFileWriteTime), String.valueOf(secondFileWriteTime)); - verify(folderManager, times(2)).createWritableFile(); - } - - @Test - void whenThereIsNoSpaceAvailableForWriting_retryWithNewFile() throws IOException { - byte[] data = new byte[1]; - WritableFile workingWritableFile = createWritableFile(); - when(folderManager.createWritableFile()) - .thenReturn(writableFile) - .thenReturn(workingWritableFile); - when(writableFile.append(data)).thenReturn(WritableResult.FAILED); + // Forward past first time read + currentTimeMillis.set(firstFileWriteTime + MAX_FILE_AGE_FOR_READ_MILLIS + 1); - storage.write(data); + // Read + ReadableResult result = storage.readNext(DESERIALIZER); + assertNotNull(result); + assertThat(result.getContent()).containsExactly(THIRD_LOG_RECORD); + assertThat(destinationDir.list()) + .containsExactlyInAnyOrder( + String.valueOf(firstFileWriteTime), String.valueOf(secondFileWriteTime)); - verify(folderManager, times(2)).createWritableFile(); + // Purge expired files on write + currentTimeMillis.set(50000); + assertThat(write(Collections.singletonList(FIRST_LOG_RECORD))).isTrue(); + assertThat(destinationDir.list()).containsExactly("50000"); } @Test - void whenWritingResourceIsClosed_retryWithNewFile() throws IOException { - byte[] data = new byte[1]; - WritableFile workingWritableFile = createWritableFile(); - when(folderManager.createWritableFile()) - .thenReturn(writableFile) - .thenReturn(workingWritableFile); - when(writableFile.append(data)).thenReturn(WritableResult.FAILED); - - storage.write(data); - - verify(folderManager, times(2)).createWritableFile(); - } - - @Test - void whenEveryAttemptToWriteFails_returnFalse() throws IOException { - byte[] data = new byte[1]; - when(folderManager.createWritableFile()).thenReturn(writableFile); - when(writableFile.append(data)).thenReturn(WritableResult.FAILED); - - assertFalse(storage.write(data)); - - verify(folderManager, times(3)).createWritableFile(); - } - - @Test - void whenClosing_closeWriterAndReaderIfNotNull() throws IOException { - when(folderManager.createWritableFile()).thenReturn(writableFile); - when(folderManager.getReadableFile()).thenReturn(readableFile); - storage.write(new byte[1]); - storage.readAndProcess(processing); - - storage.close(); - - verify(writableFile).close(); - verify(readableFile).close(); - } - - private static WritableFile createWritableFile() throws IOException { - WritableFile mock = mock(); - when(mock.append(any())).thenReturn(WritableResult.SUCCEEDED); - return mock; + void whenNoMoreLinesToRead_lookForNewFileToRead() throws IOException { + long firstFileWriteTime = 1000; + long secondFileWriteTime = firstFileWriteTime + MAX_FILE_AGE_FOR_WRITE_MILLIS + 1; + currentTimeMillis.set(firstFileWriteTime); + assertThat(write(Arrays.asList(FIRST_LOG_RECORD, SECOND_LOG_RECORD))).isTrue(); + + // Forward past first file write time + currentTimeMillis.set(secondFileWriteTime); + assertThat(write(Collections.singletonList(THIRD_LOG_RECORD))).isTrue(); + assertThat(destinationDir.list()) + .containsExactlyInAnyOrder( + String.valueOf(firstFileWriteTime), String.valueOf(secondFileWriteTime)); + + // Forward to all files read time + currentTimeMillis.set(secondFileWriteTime + MIN_FILE_AGE_FOR_READ_MILLIS); + + // Read + ReadableResult result = storage.readNext(DESERIALIZER); + assertNotNull(result); + assertThat(result.getContent()).containsExactly(FIRST_LOG_RECORD, SECOND_LOG_RECORD); + assertThat(destinationDir.list()) + .containsExactlyInAnyOrder( + String.valueOf(firstFileWriteTime), String.valueOf(secondFileWriteTime)); + result.delete(); + result.close(); + + // Read again + ReadableResult result2 = storage.readNext(DESERIALIZER); + assertNotNull(result2); + assertThat(result2.getContent()).containsExactly(THIRD_LOG_RECORD); + assertThat(destinationDir.list()).containsExactly(String.valueOf(secondFileWriteTime)); + result2.close(); + } + + @Test + void deleteFilesWithCorruptedData() throws IOException { + // Add files with invalid data + Files.write( + new File(destinationDir, "1000").toPath(), "random data".getBytes(StandardCharsets.UTF_8)); + Files.write( + new File(destinationDir, "2000").toPath(), "random data".getBytes(StandardCharsets.UTF_8)); + Files.write( + new File(destinationDir, "3000").toPath(), "random data".getBytes(StandardCharsets.UTF_8)); + Files.write( + new File(destinationDir, "4000").toPath(), "random data".getBytes(StandardCharsets.UTF_8)); + + // Set time ready to read all files + currentTimeMillis.set(4000 + MIN_FILE_AGE_FOR_READ_MILLIS); + + // Read + assertNull(storage.readNext(DESERIALIZER)); + assertThat(destinationDir.list()).containsExactly("4000"); // it tries 3 times max per call. + } + + private void forwardToReadTime() { + forwardCurrentTimeByMillis(MIN_FILE_AGE_FOR_READ_MILLIS); + } + + private void forwardCurrentTimeByMillis(long millis) { + currentTimeMillis.set(currentTimeMillis.get() + millis); + } + + private boolean write(Collection items) throws IOException { + serializer.initialize(items); + try { + return storage.write(serializer); + } finally { + serializer.reset(); + } + } + + private class TestClock implements Clock { + + @Override + public long now() { + return TimeUnit.MILLISECONDS.toNanos(currentTimeMillis.get()); + } + + @Override + public long nanoTime() { + return 0; + } } } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/TestData.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/TestData.java index a9a2003ae..3b51125b6 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/TestData.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/TestData.java @@ -5,34 +5,78 @@ package io.opentelemetry.contrib.disk.buffering.internal.storage; -import io.opentelemetry.contrib.disk.buffering.config.StorageConfiguration; -import io.opentelemetry.contrib.disk.buffering.config.TemporaryFileProvider; -import io.opentelemetry.contrib.disk.buffering.internal.files.DefaultTemporaryFileProvider; -import java.io.File; +import io.opentelemetry.api.common.Value; +import io.opentelemetry.api.logs.Severity; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.models.LogRecordDataImpl; +import io.opentelemetry.contrib.disk.buffering.storage.impl.FileStorageConfiguration; +import io.opentelemetry.sdk.logs.data.LogRecordData; public final class TestData { + public static final LogRecordData FIRST_LOG_RECORD = + LogRecordDataImpl.builder() + .setResource(io.opentelemetry.contrib.disk.buffering.testutils.TestData.RESOURCE_FULL) + .setSpanContext(io.opentelemetry.contrib.disk.buffering.testutils.TestData.SPAN_CONTEXT) + .setInstrumentationScopeInfo( + io.opentelemetry.contrib.disk.buffering.testutils.TestData + .INSTRUMENTATION_SCOPE_INFO_FULL) + .setAttributes(io.opentelemetry.contrib.disk.buffering.testutils.TestData.ATTRIBUTES) + .setBodyValue(Value.of("First log body")) + .setSeverity(Severity.DEBUG) + .setSeverityText("Log severity text") + .setTimestampEpochNanos(100L) + .setObservedTimestampEpochNanos(200L) + .setTotalAttributeCount(3) + .setEventName("") + .build(); + + public static final LogRecordData SECOND_LOG_RECORD = + LogRecordDataImpl.builder() + .setResource(io.opentelemetry.contrib.disk.buffering.testutils.TestData.RESOURCE_FULL) + .setSpanContext(io.opentelemetry.contrib.disk.buffering.testutils.TestData.SPAN_CONTEXT) + .setInstrumentationScopeInfo( + io.opentelemetry.contrib.disk.buffering.testutils.TestData + .INSTRUMENTATION_SCOPE_INFO_FULL) + .setAttributes(io.opentelemetry.contrib.disk.buffering.testutils.TestData.ATTRIBUTES) + .setBodyValue(Value.of("Second log body")) + .setSeverity(Severity.DEBUG) + .setSeverityText("Log severity text") + .setTimestampEpochNanos(100L) + .setObservedTimestampEpochNanos(200L) + .setTotalAttributeCount(3) + .setEventName("event") + .build(); + + public static final LogRecordData THIRD_LOG_RECORD = + LogRecordDataImpl.builder() + .setResource(io.opentelemetry.contrib.disk.buffering.testutils.TestData.RESOURCE_FULL) + .setSpanContext(io.opentelemetry.contrib.disk.buffering.testutils.TestData.SPAN_CONTEXT) + .setInstrumentationScopeInfo( + io.opentelemetry.contrib.disk.buffering.testutils.TestData + .INSTRUMENTATION_SCOPE_INFO_FULL) + .setAttributes(io.opentelemetry.contrib.disk.buffering.testutils.TestData.ATTRIBUTES) + .setBodyValue(Value.of("Third log body")) + .setSeverity(Severity.DEBUG) + .setSeverityText("Log severity text") + .setTimestampEpochNanos(100L) + .setObservedTimestampEpochNanos(200L) + .setTotalAttributeCount(3) + .setEventName("") + .build(); + public static final long MAX_FILE_AGE_FOR_WRITE_MILLIS = 1000; public static final long MIN_FILE_AGE_FOR_READ_MILLIS = MAX_FILE_AGE_FOR_WRITE_MILLIS + 500; public static final long MAX_FILE_AGE_FOR_READ_MILLIS = 10_000; - public static final int MAX_FILE_SIZE = 100; - public static final int MAX_FOLDER_SIZE = 300; - - public static StorageConfiguration getDefaultConfiguration(File rootDir) { - TemporaryFileProvider fileProvider = DefaultTemporaryFileProvider.getInstance(); - return getConfiguration(fileProvider, rootDir); - } + public static final int MAX_FILE_SIZE = 2000; + public static final int MAX_FOLDER_SIZE = 6000; - public static StorageConfiguration getConfiguration( - TemporaryFileProvider fileProvider, File rootDir) { - return StorageConfiguration.builder() - .setRootDir(rootDir) + public static FileStorageConfiguration getConfiguration() { + return FileStorageConfiguration.builder() .setMaxFileAgeForWriteMillis(MAX_FILE_AGE_FOR_WRITE_MILLIS) .setMinFileAgeForReadMillis(MIN_FILE_AGE_FOR_READ_MILLIS) .setMaxFileAgeForReadMillis(MAX_FILE_AGE_FOR_READ_MILLIS) .setMaxFileSize(MAX_FILE_SIZE) .setMaxFolderSize(MAX_FOLDER_SIZE) - .setTemporaryFileProvider(fileProvider) .build(); } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFileTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFileTest.java index dd8cb02aa..791d80faa 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFileTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/ReadableFileTest.java @@ -5,35 +5,30 @@ package io.opentelemetry.contrib.disk.buffering.internal.storage.files; +import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.FIRST_LOG_RECORD; import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.MAX_FILE_AGE_FOR_READ_MILLIS; +import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.SECOND_LOG_RECORD; +import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.THIRD_LOG_RECORD; import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.getConfiguration; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import io.opentelemetry.api.common.Value; -import io.opentelemetry.api.logs.Severity; -import io.opentelemetry.contrib.disk.buffering.config.TemporaryFileProvider; import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.DeserializationException; import io.opentelemetry.contrib.disk.buffering.internal.serialization.deserializers.SignalDeserializer; -import io.opentelemetry.contrib.disk.buffering.internal.serialization.mapping.logs.models.LogRecordDataImpl; import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.SignalSerializer; -import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.ProcessResult; -import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.ReadableResult; -import io.opentelemetry.contrib.disk.buffering.testutils.TestData; import io.opentelemetry.sdk.common.Clock; import io.opentelemetry.sdk.logs.data.LogRecordData; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -42,151 +37,54 @@ class ReadableFileTest { @TempDir File dir; private File source; - private File temporaryFile; private ReadableFile readableFile; private Clock clock; - private TemporaryFileProvider temporaryFileProvider; private static final long CREATED_TIME_MILLIS = 1000L; private static final SignalSerializer SERIALIZER = SignalSerializer.ofLogs(); private static final SignalDeserializer DESERIALIZER = SignalDeserializer.ofLogs(); - private static final LogRecordData FIRST_LOG_RECORD = - LogRecordDataImpl.builder() - .setResource(TestData.RESOURCE_FULL) - .setSpanContext(TestData.SPAN_CONTEXT) - .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) - .setAttributes(TestData.ATTRIBUTES) - .setBodyValue(Value.of("First log body")) - .setSeverity(Severity.DEBUG) - .setSeverityText("Log severity text") - .setTimestampEpochNanos(100L) - .setObservedTimestampEpochNanos(200L) - .setTotalAttributeCount(3) - .build(); - - private static final LogRecordData SECOND_LOG_RECORD = - LogRecordDataImpl.builder() - .setResource(TestData.RESOURCE_FULL) - .setSpanContext(TestData.SPAN_CONTEXT) - .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) - .setAttributes(TestData.ATTRIBUTES) - .setBodyValue(Value.of("Second log body")) - .setSeverity(Severity.DEBUG) - .setSeverityText("Log severity text") - .setTimestampEpochNanos(100L) - .setObservedTimestampEpochNanos(200L) - .setTotalAttributeCount(3) - .build(); - - private static final LogRecordData THIRD_LOG_RECORD = - LogRecordDataImpl.builder() - .setResource(TestData.RESOURCE_FULL) - .setSpanContext(TestData.SPAN_CONTEXT) - .setInstrumentationScopeInfo(TestData.INSTRUMENTATION_SCOPE_INFO_FULL) - .setAttributes(TestData.ATTRIBUTES) - .setBodyValue(Value.of("Third log body")) - .setSeverity(Severity.DEBUG) - .setSeverityText("Log severity text") - .setTimestampEpochNanos(100L) - .setObservedTimestampEpochNanos(200L) - .setTotalAttributeCount(3) - .build(); @BeforeEach void setUp() throws IOException { source = new File(dir, "sourceFile"); - temporaryFile = new File(dir, "temporaryFile"); addFileContents(source); - temporaryFileProvider = mock(); - when(temporaryFileProvider.createTemporaryFile(anyString())).thenReturn(temporaryFile); clock = mock(); - readableFile = - new ReadableFile( - source, CREATED_TIME_MILLIS, clock, getConfiguration(temporaryFileProvider, dir)); + readableFile = new ReadableFile(source, CREATED_TIME_MILLIS, clock, getConfiguration()); } - private static void addFileContents(File source) throws IOException { - List items = new ArrayList<>(); - items.add(SERIALIZER.serialize(Collections.singleton(FIRST_LOG_RECORD))); - items.add(SERIALIZER.serialize(Collections.singleton(SECOND_LOG_RECORD))); - items.add(SERIALIZER.serialize(Collections.singleton(THIRD_LOG_RECORD))); + @AfterEach + void tearDown() throws IOException { + readableFile.close(); + } + private static void addFileContents(File source) throws IOException { try (FileOutputStream out = new FileOutputStream(source)) { - for (byte[] item : items) { - out.write(item); + for (LogRecordData item : + Arrays.asList(FIRST_LOG_RECORD, SECOND_LOG_RECORD, THIRD_LOG_RECORD)) { + SERIALIZER.initialize(Collections.singleton(item)); + SERIALIZER.writeBinaryTo(out); + SERIALIZER.reset(); } } } @Test - void readSingleItemAndRemoveIt() throws IOException { - readableFile.readAndProcess( - bytes -> { - assertEquals(FIRST_LOG_RECORD, deserialize(bytes)); - return ProcessResult.SUCCEEDED; - }); - - List logs = getRemainingDataAndClose(readableFile); - - assertEquals(2, logs.size()); - assertEquals(SECOND_LOG_RECORD, logs.get(0)); - assertEquals(THIRD_LOG_RECORD, logs.get(1)); - } - - @Test - void whenProcessingSucceeds_returnSuccessStatus() throws IOException { - assertEquals( - ReadableResult.SUCCEEDED, readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED)); - } - - @Test - void whenProcessingFails_returnTryLaterStatus() throws IOException { - assertEquals( - ReadableResult.TRY_LATER, readableFile.readAndProcess(bytes -> ProcessResult.TRY_LATER)); - } - - @Test - void deleteTemporaryFileWhenClosing() throws IOException { - readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED); - readableFile.close(); - - assertFalse(temporaryFile.exists()); - } - - @Test - void readMultipleLinesAndRemoveThem() throws IOException { - readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED); - readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED); + void readAndRemoveItems() throws IOException { + assertThat(FIRST_LOG_RECORD).isEqualTo(deserialize(readableFile.readNext())); + readableFile.removeTopItem(); List logs = getRemainingDataAndClose(readableFile); - assertEquals(1, logs.size()); - assertEquals(THIRD_LOG_RECORD, logs.get(0)); - } - - @Test - void whenConsumerReturnsFalse_doNotRemoveLineFromSource() throws IOException { - readableFile.readAndProcess(bytes -> ProcessResult.TRY_LATER); - - List logs = getRemainingDataAndClose(readableFile); - - assertEquals(3, logs.size()); + assertThat(2).isEqualTo(logs.size()); + assertThat(SECOND_LOG_RECORD).isEqualTo(logs.get(0)); + assertThat(THIRD_LOG_RECORD).isEqualTo(logs.get(1)); } @Test void whenReadingLastLine_deleteOriginalFile_and_close() throws IOException { getRemainingDataAndClose(readableFile); - assertFalse(source.exists()); - assertTrue(readableFile.isClosed()); - } - - @Test - void whenTheFileContentIsInvalid_deleteOriginalFile_and_close() throws IOException { - assertEquals( - ReadableResult.FAILED, readableFile.readAndProcess(bytes -> ProcessResult.CONTENT_INVALID)); - - assertFalse(source.exists()); - assertTrue(readableFile.isClosed()); + assertThat(source.exists()).isFalse(); + assertThat(readableFile.isClosed()).isTrue(); } @Test @@ -198,50 +96,38 @@ void whenNoMoreLinesAvailableToRead_deleteOriginalFile_close_and_returnNoContent } ReadableFile emptyReadableFile = - new ReadableFile( - emptyFile, CREATED_TIME_MILLIS, clock, getConfiguration(temporaryFileProvider, dir)); + new ReadableFile(emptyFile, CREATED_TIME_MILLIS, clock, getConfiguration()); - assertEquals( - ReadableResult.FAILED, emptyReadableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED)); + assertThat(emptyReadableFile.readNext()).isNull(); - assertTrue(emptyReadableFile.isClosed()); - assertFalse(emptyFile.exists()); + assertThat(emptyReadableFile.isClosed()).isTrue(); + assertThat(emptyFile.exists()).isFalse(); } @Test - void - whenReadingAfterTheConfiguredReadingTimeExpired_deleteOriginalFile_close_and_returnFileExpiredException() - throws IOException { - readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED); + void whenReadingAfterTheConfiguredReadingTimeExpired_close() throws IOException { when(clock.now()) .thenReturn(MILLISECONDS.toNanos(CREATED_TIME_MILLIS + MAX_FILE_AGE_FOR_READ_MILLIS)); - assertEquals( - ReadableResult.FAILED, readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED)); - - assertTrue(readableFile.isClosed()); + assertThat(readableFile.readNext()).isNull(); + assertThat(readableFile.isClosed()).isTrue(); } @Test - void whenReadingAfterClosed_returnFailedStatus() throws IOException { - readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED); + void whenReadingAfterClosed_returnNull() throws IOException { readableFile.close(); - assertEquals( - ReadableResult.FAILED, readableFile.readAndProcess(bytes -> ProcessResult.SUCCEEDED)); + assertThat(readableFile.readNext()).isNull(); } private static List getRemainingDataAndClose(ReadableFile readableFile) throws IOException { List result = new ArrayList<>(); - ReadableResult readableResult = ReadableResult.SUCCEEDED; - while (readableResult == ReadableResult.SUCCEEDED) { - readableResult = - readableFile.readAndProcess( - bytes -> { - result.add(deserialize(bytes)); - return ProcessResult.SUCCEEDED; - }); + byte[] bytes = readableFile.readNext(); + while (bytes != null) { + result.add(deserialize(bytes)); + readableFile.removeTopItem(); + bytes = readableFile.readNext(); } readableFile.close(); diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFileTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFileTest.java index 8ff749c1e..2f3d408d6 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFileTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/WritableFileTest.java @@ -8,12 +8,11 @@ import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.MAX_FILE_AGE_FOR_WRITE_MILLIS; import static io.opentelemetry.contrib.disk.buffering.internal.storage.TestData.MAX_FILE_SIZE; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import io.opentelemetry.contrib.disk.buffering.internal.serialization.serializers.ByteArraySerializer; import io.opentelemetry.contrib.disk.buffering.internal.storage.TestData; import io.opentelemetry.contrib.disk.buffering.internal.storage.responses.WritableResult; import io.opentelemetry.sdk.common.Clock; @@ -22,6 +21,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.List; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -43,67 +43,76 @@ void setUp() throws IOException { new WritableFile( new File(rootDir, String.valueOf(CREATED_TIME_MILLIS)), CREATED_TIME_MILLIS, - TestData.getDefaultConfiguration(rootDir), + TestData.getConfiguration(), clock); } + @AfterEach + void tearDown() throws IOException { + writableFile.close(); + } + @Test void hasNotExpired_whenWriteAgeHasNotExpired() { when(clock.now()).thenReturn(MILLISECONDS.toNanos(1500L)); - assertFalse(writableFile.hasExpired()); + assertThat(writableFile.hasExpired()).isFalse(); } @Test void hasExpired_whenWriteAgeHasExpired() { when(clock.now()).thenReturn(MILLISECONDS.toNanos(2000L)); - assertTrue(writableFile.hasExpired()); + assertThat(writableFile.hasExpired()).isTrue(); } @Test void appendDataInNewLines_andIncreaseSize() throws IOException { byte[] line1 = getByteArrayLine("First line"); byte[] line2 = getByteArrayLine("Second line"); - writableFile.append(line1); - writableFile.append(line2); + writableFile.append(new ByteArraySerializer(line1)); + writableFile.append(new ByteArraySerializer(line2)); writableFile.close(); List lines = getWrittenLines(); - assertEquals(2, lines.size()); - assertEquals("First line", lines.get(0)); - assertEquals("Second line", lines.get(1)); - assertEquals(line1.length + line2.length, writableFile.getSize()); + assertThat(lines).hasSize(2); + assertThat(lines.get(0)).isEqualTo("First line"); + assertThat(lines.get(1)).isEqualTo("Second line"); + assertThat(writableFile.getSize()).isEqualTo(line1.length + line2.length); } @Test void whenAppendingData_andNotEnoughSpaceIsAvailable_closeAndReturnFailed() throws IOException { - assertEquals(WritableResult.SUCCEEDED, writableFile.append(new byte[MAX_FILE_SIZE])); + assertThat(writableFile.append(new ByteArraySerializer(new byte[MAX_FILE_SIZE]))) + .isEqualTo(WritableResult.SUCCEEDED); - assertEquals(WritableResult.FAILED, writableFile.append(new byte[1])); + assertThat(writableFile.append(new ByteArraySerializer(new byte[1]))) + .isEqualTo(WritableResult.FAILED); - assertEquals(1, getWrittenLines().size()); - assertEquals(MAX_FILE_SIZE, writableFile.getSize()); + assertThat(getWrittenLines()).hasSize(1); + assertThat(writableFile.getSize()).isEqualTo(MAX_FILE_SIZE); } @Test void whenAppendingData_andHasExpired_closeAndReturnExpiredStatus() throws IOException { - writableFile.append(new byte[2]); + writableFile.append(new ByteArraySerializer(new byte[2])); when(clock.now()) .thenReturn(MILLISECONDS.toNanos(CREATED_TIME_MILLIS + MAX_FILE_AGE_FOR_WRITE_MILLIS)); - assertEquals(WritableResult.FAILED, writableFile.append(new byte[1])); + assertThat(writableFile.append(new ByteArraySerializer(new byte[1]))) + .isEqualTo(WritableResult.FAILED); - assertEquals(1, getWrittenLines().size()); + assertThat(getWrittenLines()).hasSize(1); } @Test void whenAppendingData_andIsAlreadyClosed_returnFailedStatus() throws IOException { - writableFile.append(new byte[1]); + writableFile.append(new ByteArraySerializer(new byte[1])); writableFile.close(); - assertEquals(WritableResult.FAILED, writableFile.append(new byte[2])); + assertThat(writableFile.append(new ByteArraySerializer(new byte[2]))) + .isEqualTo(WritableResult.FAILED); } private static byte[] getByteArrayLine(String line) { diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStreamTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStreamTest.java new file mode 100644 index 000000000..c2ad06f28 --- /dev/null +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/internal/storage/files/utils/FileStreamTest.java @@ -0,0 +1,82 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.disk.buffering.internal.storage.files.utils; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +class FileStreamTest { + @TempDir File dir; + + @Test + void truncateTop() throws IOException { + String initialText = "1,2,3,4,5"; + byte[] readBuffer; + File temporaryFile = new File(dir, "temporaryFile"); + writeString(temporaryFile, initialText); + + FileStream stream = FileStream.create(temporaryFile); + + // Assert initial size + assertThat(stream.size()).isEqualTo(9); + + assertThat((char) stream.read()).asString().isEqualTo("1"); + assertThat(readString(temporaryFile)).isEqualTo(initialText); + assertThat(stream.size()).isEqualTo(9); + + // Truncate until current position + stream.truncateTop(); + assertThat(readString(temporaryFile)).isEqualTo(",2,3,4,5"); + assertThat(stream.size()).isEqualTo(8); + + // Truncate fixed size from the top + stream.truncateTop(3); + + // Ensure that the changes are made before closing the stream. + assertThat(readString(temporaryFile)).isEqualTo("3,4,5"); + assertThat(stream.size()).isEqualTo(5); + + // Truncate again + readBuffer = new byte[3]; + stream.read(readBuffer); + assertThat(readBuffer).asString().isEqualTo("3,4"); + assertThat(stream.size()).isEqualTo(5); + + stream.truncateTop(2); + + // Ensure that the changes are made before closing the stream. + assertThat(readString(temporaryFile)).isEqualTo("4,5"); + assertThat(stream.size()).isEqualTo(3); + + // Truncate all available data + stream.truncateTop(3); + assertThat(stream).isEmpty(); + assertThat(readString(temporaryFile)).isEqualTo(""); + + stream.close(); + + // Ensure that the changes are kept after closing the stream. + assertThat(readString(temporaryFile)).isEqualTo(""); + } + + private static void writeString(File file, String text) throws IOException { + Files.write( + file.toPath(), text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE_NEW); + } + + @NotNull + private static String readString(File file) throws IOException { + return new String(Files.readAllBytes(file.toPath()), StandardCharsets.UTF_8); + } +} diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/BaseSignalSerializerTest.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/BaseSignalSerializerTest.java index d413c4aa6..d113d9f22 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/BaseSignalSerializerTest.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/BaseSignalSerializerTest.java @@ -12,6 +12,7 @@ import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.DelimitedProtoStreamReader; import io.opentelemetry.contrib.disk.buffering.internal.storage.files.reader.StreamReader; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -20,13 +21,23 @@ @SuppressWarnings("unchecked") public abstract class BaseSignalSerializerTest { protected byte[] serialize(SIGNAL_SDK_ITEM... items) { - return getSerializer().serialize(Arrays.asList(items)); + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + SignalSerializer serializer = getSerializer(); + try { + serializer.initialize(Arrays.asList(items)); + serializer.writeBinaryTo(byteArrayOutputStream); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + serializer.reset(); + } + return byteArrayOutputStream.toByteArray(); } protected List deserialize(byte[] source) { try (ByteArrayInputStream in = new ByteArrayInputStream(source)) { StreamReader streamReader = DelimitedProtoStreamReader.Factory.getInstance().create(in); - return getDeserializer().deserialize(Objects.requireNonNull(streamReader.read()).content); + return getDeserializer().deserialize(Objects.requireNonNull(streamReader.readNext())); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/TestData.java b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/TestData.java index 15c32c421..0da28971c 100644 --- a/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/TestData.java +++ b/disk-buffering/src/test/java/io/opentelemetry/contrib/disk/buffering/testutils/TestData.java @@ -37,6 +37,8 @@ public final class TestData { .put("conditions", false, true) .put("scores", 0L, 1L) .put("coins", 0.01, 0.05, 0.1) + .put("empty", "") + .put("blank", " ") .build(); public static final Resource RESOURCE_FULL = @@ -131,5 +133,24 @@ private static LongExemplarData makeLongExemplarData(TraceFlags flags) { return ImmutableLongExemplarData.create(ATTRIBUTES, 100L, context, 1L); } + @NotNull + public static byte[] makeTooShortSignalBinary() { + return new byte[] { + (byte) 0x0A, // type + (byte) 0xFF, // defining length 255, but message is shorter + (byte) 0x01 // content + }; + } + + @NotNull + public static byte[] makeMalformedSignalBinary() { + return new byte[] { + (byte) 0x0A, // type + (byte) 0x02, // length + (byte) 0x08, // field 1, wire type 0 (varint) - this should be a nested message but isn't + (byte) 0x01 // content + }; + } + private TestData() {} } diff --git a/docs/apidiffs/1.50.0_vs_1.49.0/opentelemetry-aws-xray.txt b/docs/apidiffs/1.50.0_vs_1.49.0/opentelemetry-aws-xray.txt new file mode 100644 index 000000000..674d298c5 --- /dev/null +++ b/docs/apidiffs/1.50.0_vs_1.49.0/opentelemetry-aws-xray.txt @@ -0,0 +1,2 @@ +Comparing source compatibility of opentelemetry-aws-xray-1.50.0.jar against opentelemetry-aws-xray-1.49.0.jar +No changes. \ No newline at end of file diff --git a/docs/apidiffs/current_vs_latest/opentelemetry-aws-xray.txt b/docs/apidiffs/current_vs_latest/opentelemetry-aws-xray.txt new file mode 100644 index 000000000..6c4fe5eda --- /dev/null +++ b/docs/apidiffs/current_vs_latest/opentelemetry-aws-xray.txt @@ -0,0 +1,2 @@ +Comparing source compatibility of opentelemetry-aws-xray-1.51.0-SNAPSHOT.jar against opentelemetry-aws-xray-1.50.0.jar +No changes. \ No newline at end of file diff --git a/docs/style-guide.md b/docs/style-guide.md new file mode 100644 index 000000000..9acf641ae --- /dev/null +++ b/docs/style-guide.md @@ -0,0 +1,166 @@ +# Style Guide + +This project follows the +[Google Java Style Guide](https://google.github.io/styleguide/javaguide.html). + +## Code Formatting + +### Auto-formatting + +The build will fail if source code is not formatted according to Google Java Style. + +Run the following command to reformat all files: + +```bash +./gradlew spotlessApply +``` + +For IntelliJ users, an `.editorconfig` file is provided that IntelliJ will automatically use to +adjust code formatting settings. However, it does not support all required rules, so you may still +need to run `./gradlew spotlessApply` periodically. + +### Static imports + +Consider statically importing the following commonly used methods and constants: + +- **Test methods** + - `io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.*` + - `org.assertj.core.api.Assertions.*` + - `org.mockito.Mockito.*` + - `org.mockito.ArgumentMatchers.*` +- **Utility methods** + - `io.opentelemetry.api.common.AttributeKey.*` + - `java.util.Arrays` - asList, stream + - `java.util.Collections` - singleton*, empty*, unmodifiable*, synchronized*, checked* + - `java.util.Objects` - requireNonNull + - `java.util.function.Function` - identity + - `java.util.stream.Collectors.*` +- **Utility constants** + - `java.util.Locale.*` + - `java.util.concurrent.TimeUnit.*` + - `java.util.logging.Level.*` + - `java.nio.charset.StandardCharsets.*` +- **OpenTelemetry semantic convention constants** + - All constants under `io.opentelemetry.semconv.**`, except for + `io.opentelemetry.semconv.SchemaUrls.*` constants. + +### Class organization + +Prefer this order: + +- Static fields (final before non-final) +- Instance fields (final before non-final) +- Constructors +- Methods +- Nested classes + +**Method ordering**: Place calling methods above the methods they call. For example, place private +methods below the non-private methods that use them. + +**Static utility classes**: Place the private constructor (used to prevent instantiation) after all +methods. + +## Java Language Conventions + +### Visibility modifiers + +Follow the principle of minimal necessary visibility. Use the most restrictive access modifier that +still allows the code to function correctly. + +### Internal packages + +Classes in `.internal` packages are not considered public API and may change without notice. These +packages contain implementation details that should not be used by external consumers. + +- Use `.internal` packages for implementation classes that need to be public within the module but + should not be used externally +- Try to avoid referencing `.internal` classes from other modules + +### `final` keyword usage + +Public non-internal non-test classes should be declared `final` where possible. + +Methods should only be declared `final` if they are in public non-internal non-test non-final classes. + +Fields should be declared `final` where possible. + +Method parameters and local variables should never be declared `final`. + +### `@Nullable` annotation usage + +**Note: This section is aspirational and may not reflect the current codebase.** + +Annotate all parameters and fields that can be `null` with `@Nullable` (specifically +`javax.annotation.Nullable`, which is included by the `otel.java-conventions` Gradle plugin as a +`compileOnly` dependency). + +`@NonNull` is unnecessary as it is the default. + +**Defensive programming**: Public APIs should still check for `null` parameters even if not +annotated with `@Nullable`. Internal APIs do not need these checks. + +### `Optional` usage + +Following the reasoning from +[Writing a Java library with better experience (slide 12)](https://speakerdeck.com/trustin/writing-a-java-library-with-better-experience?slide=12), +`java.util.Optional` usage is kept to a minimum. + +**Guidelines**: + +- `Optional` shouldn't appear in public API signatures +- Avoid `Optional` on the hot path (instrumentation code), unless the instrumented library uses it + +## Tooling conventions + +### AssertJ + +Prefer AssertJ assertions over JUnit assertions (assertEquals, assertTrue, etc.) for better error +messages. + +### JUnit + +Test classes and test methods should generally be package-protected (no explicit visibility +modifier) rather than `public`. This follows the principle of minimal necessary visibility and is +sufficient for JUnit to discover and execute tests. + +### Gradle + +- Use Kotlin instead of Groovy for build scripts +- Plugin versions should be specified in `settings.gradle.kts`, not in individual modules +- All modules should use `plugins { id("otel.java-conventions") }` +- Set module names with `otelJava.moduleName.set("io.opentelemetry.contrib.mymodule")` + +## Configuration + +- Use `otel.` prefix for all configuration property keys +- Read configuration via the `ConfigProperties` interface +- Provide sensible defaults and document all options +- Validate configuration early with clear error messages + +## Performance + +Avoid allocations on the hot path (instrumentation code) whenever possible. This includes `Iterator` +allocations from collections; note that `for (SomeType t : plainJavaArray)` does not allocate an +iterator object. + +Non-allocating Stream API usage on the hot path is acceptable but may not fit the surrounding code +style; this is a judgment call. Some Stream APIs make efficient allocation difficult (e.g., +`collect` with pre-sized sink data structures involves convoluted `Supplier` code, or lambdas passed +to `forEach` may be capturing/allocating lambdas). + +## Documentation + +### Component README files + +- Include a component owners section in each module's README +- Document configuration options with examples + +### Deprecation and breaking changes + +Breaking changes are allowed in unstable modules (published with `-alpha` version suffix). + +1. Mark APIs with `@Deprecated` and a removal timeline (there must be at least one release with the + API marked as deprecated before removing it) +2. Document the replacement in Javadoc with `@deprecated` tag +3. Note the migration path for breaking changes under a "Migration notes" section of CHANGELOG.md + (create this section at the top of the Unreleased section if not already present) diff --git a/example/README.md b/example/README.md deleted file mode 100644 index 43333d2c2..000000000 --- a/example/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Example Library - -This is an example library intended to be used as a template for easy additions to the OpenTelemetry Java Contrib project. diff --git a/example/build.gradle.kts b/example/build.gradle.kts deleted file mode 100644 index 898191643..000000000 --- a/example/build.gradle.kts +++ /dev/null @@ -1,13 +0,0 @@ -plugins { - id("otel.java-conventions") -} - -description = "An example OpenTelemetry Java Contrib library" - -tasks { - jar { - manifest { - attributes["Main-Class"] = "io.opentelemetry.contrib.example.Library" - } - } -} diff --git a/example/src/main/java/io/opentelemetry/contrib/example/Library.java b/example/src/main/java/io/opentelemetry/contrib/example/Library.java deleted file mode 100644 index 289f72ea8..000000000 --- a/example/src/main/java/io/opentelemetry/contrib/example/Library.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.example; - -public class Library { - - public boolean myMethod() { - return true; - } - - @SuppressWarnings("SystemOut") - public static void main(String... args) { - System.out.println("ExampleLibrary.main"); - } -} diff --git a/example/src/test/java/io/opentelemetry/contrib/example/LibraryTest.java b/example/src/test/java/io/opentelemetry/contrib/example/LibraryTest.java deleted file mode 100644 index a9c897576..000000000 --- a/example/src/test/java/io/opentelemetry/contrib/example/LibraryTest.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.example; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.jupiter.api.Test; - -class LibraryTest { - - @Test - void myMethod() { - Library library = new Library(); - assertThat(library.myMethod()).isTrue(); - } -} diff --git a/gcp-auth-extension/README.md b/gcp-auth-extension/README.md index bb2c32886..209283daa 100644 --- a/gcp-auth-extension/README.md +++ b/gcp-auth-extension/README.md @@ -43,10 +43,14 @@ Here is a list of required and optional configuration available for the extensio #### Optional Config -- `GOOGLE_CLOUD_QUOTA_PROJECT`: Environment variable that represents the Google Cloud Quota Project ID which will be charged for the GCP API usage. To learn more about a *quota project*, see [here](https://cloud.google.com/docs/quotas/quota-project). Additional details about configuring the *quota project* can be found [here](https://cloud.google.com/docs/quotas/set-quota-project). +- `GOOGLE_CLOUD_QUOTA_PROJECT`: Environment variable that represents the Google Cloud Quota Project ID which will be charged for the GCP API usage. To learn more about a *quota project*, see the [Quota project overview](https://cloud.google.com/docs/quotas/quota-project) page. Additional details about configuring the *quota project* can be found on the [Set the quota project](https://cloud.google.com/docs/quotas/set-quota-project) page. - Can also be configured using `google.cloud.quota.project` system property. +- `GOOGLE_OTEL_AUTH_TARGET_SIGNALS`: Environment variable that specifies a comma-separated list of OpenTelemetry signals for which this authentication extension should be active. Valid values contain - `metrics`, `traces` or `all`. If left unspecified, `all` is assumed meaning the extension will attempt to apply authentication to exports for all signals. + + - Can also be configured using `google.otel.auth.target.signals` system property. + ## Usage ### With OpenTelemetry Java agent diff --git a/gcp-auth-extension/build.gradle.kts b/gcp-auth-extension/build.gradle.kts index 112113e66..d7e99ad30 100644 --- a/gcp-auth-extension/build.gradle.kts +++ b/gcp-auth-extension/build.gradle.kts @@ -1,8 +1,7 @@ plugins { id("otel.java-conventions") id("otel.publish-conventions") - id("com.github.johnrengelman.shadow") - id("org.springframework.boot") version "2.7.18" + id("com.gradleup.shadow") } description = "OpenTelemetry extension that provides GCP authentication support for OTLP exporters" @@ -14,6 +13,8 @@ val agent: Configuration by configurations.creating { } dependencies { + implementation(platform("org.springframework.boot:spring-boot-dependencies:2.7.18")) + annotationProcessor("com.google.auto.service:auto-service") // We use `compileOnly` dependency because during runtime all necessary classes are provided by // javaagent itself. @@ -23,7 +24,7 @@ dependencies { compileOnly("io.opentelemetry:opentelemetry-exporter-otlp") // Only dependencies added to `implementation` configuration will be picked up by Shadow plugin - implementation("com.google.auth:google-auth-library-oauth2-http:1.33.1") + implementation("com.google.auth:google-auth-library-oauth2-http:1.39.1") // Test dependencies testCompileOnly("com.google.auto.service:auto-service-annotations") @@ -41,7 +42,7 @@ dependencies { testImplementation("org.mockito:mockito-inline") testImplementation("org.mockito:mockito-junit-jupiter") testImplementation("org.mock-server:mockserver-netty:5.15.0") - testImplementation("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha") + testImplementation("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha") testImplementation("org.springframework.boot:spring-boot-starter-web:2.7.18") testImplementation("org.springframework.boot:spring-boot-starter:2.7.18") testImplementation("org.springframework.boot:spring-boot-starter-test:2.7.18") @@ -55,6 +56,9 @@ dependencies { tasks { test { useJUnitPlatform() + // Unset relevant environment variables to provide a clean state for the tests + environment("GOOGLE_CLOUD_PROJECT", "") + environment("GOOGLE_CLOUD_QUOTA_PROJECT", "") // exclude integration test exclude("io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.class") } @@ -85,11 +89,6 @@ tasks { assemble { dependsOn(shadowJar) } - - bootJar { - // disable bootJar in build since it only runs as part of test - enabled = false - } } val builtLibsDir = layout.buildDirectory.dir("libs").get().asFile.absolutePath @@ -103,7 +102,10 @@ tasks.register("copyAgent") { }) } -tasks.register("IntegrationTest") { +tasks.register("IntegrationTestUserCreds") { + testClassesDirs = sourceSets.test.get().output.classesDirs + classpath = sourceSets.test.get().runtimeClasspath + dependsOn(tasks.shadowJar) dependsOn(tasks.named("copyAgent")) @@ -111,7 +113,7 @@ tasks.register("IntegrationTest") { // include only the integration test file include("io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.class") - val fakeCredsFilePath = project.file("src/test/resources/fakecreds.json").absolutePath + val fakeCredsFilePath = project.file("src/test/resources/fake_user_creds.json").absolutePath environment("GOOGLE_CLOUD_QUOTA_PROJECT", "quota-project-id") environment("GOOGLE_APPLICATION_CREDENTIALS", fakeCredsFilePath) @@ -127,6 +129,7 @@ tasks.register("IntegrationTest") { "-Dotel.metrics.exporter=none", "-Dotel.logs.exporter=none", "-Dotel.exporter.otlp.protocol=http/protobuf", - "-Dmockserver.logLevel=off" + "-Dotel.javaagent.debug=false", + "-Dmockserver.logLevel=trace" ) } diff --git a/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/ConfigurableOption.java b/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/ConfigurableOption.java index 1bf90e48f..aba0ddf11 100644 --- a/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/ConfigurableOption.java +++ b/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/ConfigurableOption.java @@ -5,15 +5,18 @@ package io.opentelemetry.contrib.gcp.auth; +import static java.util.Locale.ROOT; + +import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.autoconfigure.spi.ConfigurationException; -import java.util.Locale; +import java.util.Optional; import java.util.function.Supplier; /** * An enum representing configurable options for a GCP Authentication Extension. Each option has a * user-readable name and can be configured using environment variables or system properties. */ -public enum ConfigurableOption { +enum ConfigurableOption { /** * Represents the Google Cloud Project ID option. Can be configured using the environment variable * `GOOGLE_CLOUD_PROJECT` or the system property `google.cloud.project`. @@ -29,7 +32,27 @@ public enum ConfigurableOption { * href="https://cloud.google.com/docs/quotas/set-quota-project">official GCP client * libraries. */ - GOOGLE_CLOUD_QUOTA_PROJECT("Google Cloud Quota Project ID"); + GOOGLE_CLOUD_QUOTA_PROJECT("Google Cloud Quota Project ID"), + + /** + * Specifies a comma-separated list of OpenTelemetry signals for which this authentication + * extension should be active. The authentication mechanisms provided by this extension will only + * be applied to the listed signals. If not set, {@code all} is assumed to be set which means + * authentication is enabled for all supported signals. + * + *

Valid signal values are: + * + *

    + *
  • {@code metrics} - Enables authentication for metric exports. + *
  • {@code traces} - Enables authentication for trace exports. + *
  • {@code all} - Enables authentication for all exports. + *
+ * + *

The values are case-sensitive. Whitespace around commas and values is ignored. Can be + * configured using the environment variable `GOOGLE_OTEL_AUTH_TARGET_SIGNALS` or the system + * property `google.otel.auth.target.signals`. + */ + GOOGLE_OTEL_AUTH_TARGET_SIGNALS("Target Signals for Google Authentication Extension"); private final String userReadableName; private final String environmentVariableName; @@ -38,8 +61,7 @@ public enum ConfigurableOption { ConfigurableOption(String userReadableName) { this.userReadableName = userReadableName; this.environmentVariableName = this.name(); - this.systemPropertyName = - this.environmentVariableName.toLowerCase(Locale.ENGLISH).replace('_', '.'); + this.systemPropertyName = this.environmentVariableName.toLowerCase(ROOT).replace('_', '.'); } /** @@ -60,6 +82,15 @@ String getSystemProperty() { return this.systemPropertyName; } + /** + * Returns the user readable name associated with this option. + * + * @return the user readable name (e.g., "Google Cloud Quota Project ID") + */ + String getUserReadableName() { + return this.userReadableName; + } + /** * Retrieves the configured value for this option. This method checks the environment variable * first and then the system property. @@ -68,14 +99,10 @@ String getSystemProperty() { * @throws ConfigurationException if neither the environment variable nor the system property is * set. */ - String getConfiguredValue() { - String envVar = System.getenv(this.getEnvironmentVariable()); - String sysProp = System.getProperty(this.getSystemProperty()); - - if (envVar != null && !envVar.isEmpty()) { - return envVar; - } else if (sysProp != null && !sysProp.isEmpty()) { - return sysProp; + String getConfiguredValue(ConfigProperties configProperties) { + String configuredValue = configProperties.getString(this.getSystemProperty()); + if (configuredValue != null && !configuredValue.isEmpty()) { + return configuredValue; } else { throw new ConfigurationException( String.format( @@ -94,11 +121,28 @@ String getConfiguredValue() { * @return The configured value for the option, obtained from the environment variable, system * property, or the fallback function, in that order of precedence. */ - String getConfiguredValueWithFallback(Supplier fallback) { + String getConfiguredValueWithFallback( + ConfigProperties configProperties, Supplier fallback) { try { - return this.getConfiguredValue(); + return this.getConfiguredValue(configProperties); } catch (ConfigurationException e) { return fallback.get(); } } + + /** + * Retrieves the value for this option, prioritizing environment variables before system + * properties. If neither an environment variable nor a system property is set for this option, + * then an empty {@link Optional} is returned. + * + * @return The configured value for the option, if set, obtained from the environment variable, + * system property, or empty {@link Optional}, in that order of precedence. + */ + Optional getConfiguredValueAsOptional(ConfigProperties configProperties) { + try { + return Optional.of(this.getConfiguredValue(configProperties)); + } catch (ConfigurationException e) { + return Optional.empty(); + } + } } diff --git a/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProvider.java b/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProvider.java index 70e9bdd3b..12f73d5bb 100644 --- a/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProvider.java +++ b/gcp-auth-extension/src/main/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProvider.java @@ -5,23 +5,37 @@ package io.opentelemetry.contrib.gcp.auth; +import static io.opentelemetry.api.common.AttributeKey.stringKey; +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toMap; + import com.google.auth.oauth2.GoogleCredentials; import com.google.auto.service.AutoService; -import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.contrib.gcp.auth.GoogleAuthException.Reason; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporter; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporterBuilder; import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporter; import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporterBuilder; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporterBuilder; import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporterBuilder; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; +import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.export.SpanExporter; import java.io.IOException; -import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nonnull; /** * An AutoConfigurationCustomizerProvider for Google Cloud Platform (GCP) OpenTelemetry (OTLP) @@ -40,16 +54,41 @@ public class GcpAuthAutoConfigurationCustomizerProvider implements AutoConfigurationCustomizerProvider { - static final String QUOTA_USER_PROJECT_HEADER = "X-Goog-User-Project"; + private static final Logger logger = + Logger.getLogger(GcpAuthAutoConfigurationCustomizerProvider.class.getName()); + private static final String SIGNAL_TARGET_WARNING_FIX_SUGGESTION = + String.format( + "You may safely ignore this warning if it is intentional, otherwise please configure the '%s' by exporting valid values to environment variable: %s or by setting valid values in system property: %s.", + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getUserReadableName(), + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getEnvironmentVariable(), + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty()); + + static final String QUOTA_USER_PROJECT_HEADER = "x-goog-user-project"; static final String GCP_USER_PROJECT_ID_KEY = "gcp.project_id"; + static final String SIGNAL_TYPE_TRACES = "traces"; + static final String SIGNAL_TYPE_METRICS = "metrics"; + static final String SIGNAL_TYPE_ALL = "all"; + /** - * Customizes the provided {@link AutoConfigurationCustomizer}. + * Customizes the provided {@link AutoConfigurationCustomizer} such that authenticated exports to + * GCP Telemetry API are possible from the configured OTLP exporter. * *

This method attempts to retrieve Google Application Default Credentials (ADC) and performs - * the following: - Adds authorization headers to the configured {@link SpanExporter} based on the - * retrieved credentials. - Adds default properties for OTLP endpoint and resource attributes for - * GCP integration. + * the following: + * + *

    + *
  • Verifies whether the configured OTLP endpoint (base or signal specific) is a known GCP + * endpoint. + *
  • If the configured base OTLP endpoint is a known GCP Telemetry API endpoint, customizes + * both the configured OTLP {@link SpanExporter} and {@link MetricExporter}. + *
  • If the configured signal specific endpoint is a known GCP Telemetry API endpoint, + * customizes only the signal specific exporter. + *
+ * + * The 'customization' performed includes customizing the exporters by adding required headers to + * the export calls made and customizing the resource by adding required resource attributes to + * enable GCP integration. * * @param autoConfiguration the AutoConfigurationCustomizer to customize. * @throws GoogleAuthException if there's an error retrieving Google Application Default @@ -58,7 +97,7 @@ public class GcpAuthAutoConfigurationCustomizerProvider * not configured through environment variables or system properties. */ @Override - public void customize(AutoConfigurationCustomizer autoConfiguration) { + public void customize(@Nonnull AutoConfigurationCustomizer autoConfiguration) { GoogleCredentials credentials; try { credentials = GoogleCredentials.getApplicationDefault(); @@ -67,7 +106,11 @@ public void customize(AutoConfigurationCustomizer autoConfiguration) { } autoConfiguration .addSpanExporterCustomizer( - (exporter, configProperties) -> addAuthorizationHeaders(exporter, credentials)) + (spanExporter, configProperties) -> + customizeSpanExporter(spanExporter, credentials, configProperties)) + .addMetricExporterCustomizer( + (metricExporter, configProperties) -> + customizeMetricExporter(metricExporter, credentials, configProperties)) .addResourceCustomizer(GcpAuthAutoConfigurationCustomizerProvider::customizeResource); } @@ -76,47 +119,119 @@ public int order() { return Integer.MAX_VALUE - 1; } + private static SpanExporter customizeSpanExporter( + SpanExporter exporter, GoogleCredentials credentials, ConfigProperties configProperties) { + if (isSignalTargeted(SIGNAL_TYPE_TRACES, configProperties)) { + return addAuthorizationHeaders(exporter, credentials, configProperties); + } else { + String[] params = {SIGNAL_TYPE_TRACES, SIGNAL_TARGET_WARNING_FIX_SUGGESTION}; + logger.log( + Level.WARNING, + "GCP Authentication Extension is not configured for signal type: {0}. {1}", + params); + } + return exporter; + } + + private static MetricExporter customizeMetricExporter( + MetricExporter exporter, GoogleCredentials credentials, ConfigProperties configProperties) { + if (isSignalTargeted(SIGNAL_TYPE_METRICS, configProperties)) { + return addAuthorizationHeaders(exporter, credentials, configProperties); + } else { + String[] params = {SIGNAL_TYPE_METRICS, SIGNAL_TARGET_WARNING_FIX_SUGGESTION}; + logger.log( + Level.WARNING, + "GCP Authentication Extension is not configured for signal type: {0}. {1}", + params); + } + return exporter; + } + + // Checks if the auth extension is configured to target the passed signal for authentication. + private static boolean isSignalTargeted(String checkSignal, ConfigProperties configProperties) { + String userSpecifiedTargetedSignals = + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getConfiguredValueWithFallback( + configProperties, () -> SIGNAL_TYPE_ALL); + return stream(userSpecifiedTargetedSignals.split(",")) + .map(String::trim) + .anyMatch( + targetedSignal -> + targetedSignal.equals(checkSignal) || targetedSignal.equals(SIGNAL_TYPE_ALL)); + } + // Adds authorization headers to the calls made by the OtlpGrpcSpanExporter and // OtlpHttpSpanExporter. private static SpanExporter addAuthorizationHeaders( - SpanExporter exporter, GoogleCredentials credentials) { + SpanExporter exporter, GoogleCredentials credentials, ConfigProperties configProperties) { if (exporter instanceof OtlpHttpSpanExporter) { OtlpHttpSpanExporterBuilder builder = ((OtlpHttpSpanExporter) exporter) - .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials)); + .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials, configProperties)); return builder.build(); } else if (exporter instanceof OtlpGrpcSpanExporter) { OtlpGrpcSpanExporterBuilder builder = ((OtlpGrpcSpanExporter) exporter) - .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials)); + .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials, configProperties)); + return builder.build(); + } + return exporter; + } + + // Adds authorization headers to the calls made by the OtlpGrpcMetricExporter and + // OtlpHttpMetricExporter. + private static MetricExporter addAuthorizationHeaders( + MetricExporter exporter, GoogleCredentials credentials, ConfigProperties configProperties) { + if (exporter instanceof OtlpHttpMetricExporter) { + OtlpHttpMetricExporterBuilder builder = + ((OtlpHttpMetricExporter) exporter) + .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials, configProperties)); + return builder.build(); + } else if (exporter instanceof OtlpGrpcMetricExporter) { + OtlpGrpcMetricExporterBuilder builder = + ((OtlpGrpcMetricExporter) exporter) + .toBuilder().setHeaders(() -> getRequiredHeaderMap(credentials, configProperties)); return builder.build(); } return exporter; } - private static Map getRequiredHeaderMap(GoogleCredentials credentials) { - Map gcpHeaders = new HashMap<>(); + private static Map getRequiredHeaderMap( + GoogleCredentials credentials, ConfigProperties configProperties) { + Map> gcpHeaders; try { - credentials.refreshIfExpired(); + // this also refreshes the credentials, if required + gcpHeaders = credentials.getRequestMetadata(); } catch (IOException e) { throw new GoogleAuthException(Reason.FAILED_ADC_REFRESH, e); } - gcpHeaders.put("Authorization", "Bearer " + credentials.getAccessToken().getTokenValue()); - String configuredQuotaProjectId = - ConfigurableOption.GOOGLE_CLOUD_QUOTA_PROJECT.getConfiguredValueWithFallback( - credentials::getQuotaProjectId); - if (configuredQuotaProjectId != null && !configuredQuotaProjectId.isEmpty()) { - gcpHeaders.put(QUOTA_USER_PROJECT_HEADER, configuredQuotaProjectId); + Map flattenedHeaders = + gcpHeaders.entrySet().stream() + .collect( + toMap( + Map.Entry::getKey, + entry -> + entry.getValue().stream() + .filter(Objects::nonNull) // Filter nulls + .filter(s -> !s.isEmpty()) // Filter empty strings + .collect(joining(",")))); + // Add quota user project header if not detected by the auth library and user provided it via + // system properties. + if (!flattenedHeaders.containsKey(QUOTA_USER_PROJECT_HEADER)) { + Optional maybeConfiguredQuotaProjectId = + ConfigurableOption.GOOGLE_CLOUD_QUOTA_PROJECT.getConfiguredValueAsOptional( + configProperties); + maybeConfiguredQuotaProjectId.ifPresent( + configuredQuotaProjectId -> + flattenedHeaders.put(QUOTA_USER_PROJECT_HEADER, configuredQuotaProjectId)); } - return gcpHeaders; + return flattenedHeaders; } // Updates the current resource with the attributes required for ingesting OTLP data on GCP. private static Resource customizeResource(Resource resource, ConfigProperties configProperties) { - String gcpProjectId = ConfigurableOption.GOOGLE_CLOUD_PROJECT.getConfiguredValue(); - Resource res = - Resource.create( - Attributes.of(AttributeKey.stringKey(GCP_USER_PROJECT_ID_KEY), gcpProjectId)); + String gcpProjectId = + ConfigurableOption.GOOGLE_CLOUD_PROJECT.getConfiguredValue(configProperties); + Resource res = Resource.create(Attributes.of(stringKey(GCP_USER_PROJECT_ID_KEY), gcpProjectId)); return resource.merge(res); } } diff --git a/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProviderTest.java b/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProviderTest.java index 39f626e61..d7a4f07fd 100644 --- a/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProviderTest.java +++ b/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthAutoConfigurationCustomizerProviderTest.java @@ -7,34 +7,51 @@ import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.GCP_USER_PROJECT_ID_KEY; import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.QUOTA_USER_PROJECT_HEADER; +import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.SIGNAL_TYPE_ALL; +import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.SIGNAL_TYPE_METRICS; +import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.SIGNAL_TYPE_TRACES; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import com.google.auth.oauth2.AccessToken; import com.google.auth.oauth2.GoogleCredentials; import com.google.auto.value.AutoValue; import com.google.common.collect.ImmutableMap; import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; import io.opentelemetry.api.trace.Span; +import io.opentelemetry.common.ComponentLoader; import io.opentelemetry.context.Scope; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporter; +import io.opentelemetry.exporter.otlp.http.metrics.OtlpHttpMetricExporterBuilder; import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporter; import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporterBuilder; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporterBuilder; import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporterBuilder; import io.opentelemetry.sdk.OpenTelemetrySdk; import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdkBuilder; -import io.opentelemetry.sdk.autoconfigure.internal.AutoConfigureUtil; -import io.opentelemetry.sdk.autoconfigure.internal.ComponentLoader; import io.opentelemetry.sdk.autoconfigure.internal.SpiHelper; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.autoconfigure.spi.ConfigurationException; +import io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider; import io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider; import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.common.export.MemoryMode; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.io.IOException; import java.time.Duration; import java.time.Instant; import java.util.AbstractMap.SimpleEntry; @@ -44,10 +61,12 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Random; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Stream; +import javax.annotation.Nonnull; import javax.annotation.Nullable; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -62,19 +81,24 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.stubbing.Answer; @ExtendWith(MockitoExtension.class) class GcpAuthAutoConfigurationCustomizerProviderTest { private static final String DUMMY_GCP_RESOURCE_PROJECT_ID = "my-gcp-resource-project-id"; private static final String DUMMY_GCP_QUOTA_PROJECT_ID = "my-gcp-quota-project-id"; + private static final Random TEST_RANDOM = new Random(); @Mock private GoogleCredentials mockedGoogleCredentials; - @Captor private ArgumentCaptor>> headerSupplierCaptor; + @Captor private ArgumentCaptor>> traceHeaderSupplierCaptor; + @Captor private ArgumentCaptor>> metricHeaderSupplierCaptor; - private static final ImmutableMap otelProperties = + private static final ImmutableMap defaultOtelPropertiesSpanExporter = ImmutableMap.of( + "otel.exporter.otlp.traces.endpoint", + "https://telemetry.googleapis.com/v1/traces", "otel.traces.exporter", "otlp", "otel.metrics.exporter", @@ -84,27 +108,43 @@ class GcpAuthAutoConfigurationCustomizerProviderTest { "otel.resource.attributes", "foo=bar"); + private static final ImmutableMap defaultOtelPropertiesMetricExporter = + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://telemetry.googleapis.com/v1/metrics", + "otel.traces.exporter", + "none", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none", + "otel.resource.attributes", + "foo=bar"); + @BeforeEach public void setup() { MockitoAnnotations.openMocks(this); } + // TODO: Use parameterized test for testing traces customizer for http & grpc. @Test - public void testCustomizerOtlpHttp() { + void testTraceCustomizerOtlpHttp() { // Set resource project system property System.setProperty( ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), SIGNAL_TYPE_TRACES); // Prepare mocks prepareMockBehaviorForGoogleCredentials(); - OtlpHttpSpanExporter mockOtlpHttpSpanExporter = Mockito.mock(OtlpHttpSpanExporter.class); + OtlpHttpSpanExporter mockOtlpHttpSpanExporter = mock(OtlpHttpSpanExporter.class); OtlpHttpSpanExporterBuilder otlpSpanExporterBuilder = OtlpHttpSpanExporter.builder(); OtlpHttpSpanExporterBuilder spyOtlpHttpSpanExporterBuilder = Mockito.spy(otlpSpanExporterBuilder); - Mockito.when(spyOtlpHttpSpanExporterBuilder.build()).thenReturn(mockOtlpHttpSpanExporter); + when(spyOtlpHttpSpanExporterBuilder.build()).thenReturn(mockOtlpHttpSpanExporter); - Mockito.when(mockOtlpHttpSpanExporter.shutdown()).thenReturn(CompletableResultCode.ofSuccess()); + when(mockOtlpHttpSpanExporter.shutdown()).thenReturn(CompletableResultCode.ofSuccess()); List exportedSpans = new ArrayList<>(); - Mockito.when(mockOtlpHttpSpanExporter.export(Mockito.anyCollection())) + when(mockOtlpHttpSpanExporter.export(any())) .thenAnswer( invocationOnMock -> { exportedSpans.addAll(invocationOnMock.getArgument(0)); @@ -112,6 +152,7 @@ public void testCustomizerOtlpHttp() { }); Mockito.when(mockOtlpHttpSpanExporter.toBuilder()).thenReturn(spyOtlpHttpSpanExporterBuilder); + // begin assertions try (MockedStatic googleCredentialsMockedStatic = Mockito.mockStatic(GoogleCredentials.class)) { googleCredentialsMockedStatic @@ -122,13 +163,14 @@ public void testCustomizerOtlpHttp() { generateTestSpan(sdk); CompletableResultCode code = sdk.shutdown(); CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); - assertTrue(joinResult.isSuccess()); + assertThat(joinResult.isSuccess()).isTrue(); Mockito.verify(mockOtlpHttpSpanExporter, Mockito.times(1)).toBuilder(); Mockito.verify(spyOtlpHttpSpanExporterBuilder, Mockito.times(1)) - .setHeaders(headerSupplierCaptor.capture()); - assertEquals(2, headerSupplierCaptor.getValue().get().size()); - assertThat(authHeadersQuotaProjectIsPresent(headerSupplierCaptor.getValue().get())).isTrue(); + .setHeaders(traceHeaderSupplierCaptor.capture()); + assertThat(traceHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(traceHeaderSupplierCaptor.getValue().get())) + .isTrue(); Mockito.verify(mockOtlpHttpSpanExporter, Mockito.atLeast(1)).export(Mockito.anyCollection()); @@ -148,19 +190,22 @@ public void testCustomizerOtlpHttp() { } @Test - public void testCustomizerOtlpGrpc() { + void testTraceCustomizerOtlpGrpc() { // Set resource project system property System.setProperty( ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), SIGNAL_TYPE_TRACES); // Prepare mocks prepareMockBehaviorForGoogleCredentials(); OtlpGrpcSpanExporter mockOtlpGrpcSpanExporter = Mockito.mock(OtlpGrpcSpanExporter.class); OtlpGrpcSpanExporterBuilder spyOtlpGrpcSpanExporterBuilder = Mockito.spy(OtlpGrpcSpanExporter.builder()); List exportedSpans = new ArrayList<>(); - configureGrpcMockExporters( + configureGrpcMockSpanExporter( mockOtlpGrpcSpanExporter, spyOtlpGrpcSpanExporterBuilder, exportedSpans); + // begin assertions try (MockedStatic googleCredentialsMockedStatic = Mockito.mockStatic(GoogleCredentials.class)) { googleCredentialsMockedStatic @@ -171,13 +216,14 @@ public void testCustomizerOtlpGrpc() { generateTestSpan(sdk); CompletableResultCode code = sdk.shutdown(); CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); - assertTrue(joinResult.isSuccess()); + assertThat(joinResult.isSuccess()).isTrue(); Mockito.verify(mockOtlpGrpcSpanExporter, Mockito.times(1)).toBuilder(); Mockito.verify(spyOtlpGrpcSpanExporterBuilder, Mockito.times(1)) - .setHeaders(headerSupplierCaptor.capture()); - assertEquals(2, headerSupplierCaptor.getValue().get().size()); - assertThat(authHeadersQuotaProjectIsPresent(headerSupplierCaptor.getValue().get())).isTrue(); + .setHeaders(traceHeaderSupplierCaptor.capture()); + assertThat(traceHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(traceHeaderSupplierCaptor.getValue().get())) + .isTrue(); Mockito.verify(mockOtlpGrpcSpanExporter, Mockito.atLeast(1)).export(Mockito.anyCollection()); @@ -196,8 +242,133 @@ public void testCustomizerOtlpGrpc() { } } + // TODO: Use parameterized test for testing metrics customizer for http & grpc. + @Test + void testMetricCustomizerOtlpHttp() { + // Set resource project system property + System.setProperty( + ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), + SIGNAL_TYPE_METRICS); + // Prepare mocks + prepareMockBehaviorForGoogleCredentials(); + OtlpHttpMetricExporter mockOtlpHttpMetricExporter = Mockito.mock(OtlpHttpMetricExporter.class); + OtlpHttpMetricExporterBuilder otlpMetricExporterBuilder = OtlpHttpMetricExporter.builder(); + OtlpHttpMetricExporterBuilder spyOtlpHttpMetricExporterBuilder = + Mockito.spy(otlpMetricExporterBuilder); + List exportedMetrics = new ArrayList<>(); + configureHttpMockMetricExporter( + mockOtlpHttpMetricExporter, spyOtlpHttpMetricExporterBuilder, exportedMetrics); + + // begin assertions + try (MockedStatic googleCredentialsMockedStatic = + Mockito.mockStatic(GoogleCredentials.class)) { + googleCredentialsMockedStatic + .when(GoogleCredentials::getApplicationDefault) + .thenReturn(mockedGoogleCredentials); + + OpenTelemetrySdk sdk = buildOpenTelemetrySdkWithExporter(mockOtlpHttpMetricExporter); + generateTestMetric(sdk); + CompletableResultCode code = sdk.shutdown(); + CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); + assertThat(joinResult.isSuccess()).isTrue(); + + Mockito.verify(mockOtlpHttpMetricExporter, Mockito.times(1)).toBuilder(); + Mockito.verify(spyOtlpHttpMetricExporterBuilder, Mockito.times(1)) + .setHeaders(metricHeaderSupplierCaptor.capture()); + assertThat(metricHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(metricHeaderSupplierCaptor.getValue().get())) + .isTrue(); + + Mockito.verify(mockOtlpHttpMetricExporter, Mockito.atLeast(1)) + .export(Mockito.anyCollection()); + + assertThat(exportedMetrics) + .hasSizeGreaterThan(0) + .allSatisfy( + metricData -> { + assertThat(metricData.getResource().getAttributes().asMap()) + .containsEntry( + AttributeKey.stringKey(GCP_USER_PROJECT_ID_KEY), + DUMMY_GCP_RESOURCE_PROJECT_ID) + .containsEntry(AttributeKey.stringKey("foo"), "bar"); + assertThat(metricData.getLongSumData().getPoints()) + .hasSizeGreaterThan(0) + .allSatisfy( + longPointData -> { + assertThat(longPointData.getAttributes().asMap()) + .containsKey(AttributeKey.longKey("work_loop")); + }); + }); + } + } + @Test - public void testCustomizerFailWithMissingResourceProject() { + void testMetricCustomizerOtlpGrpc() { + // Set resource project system property + System.setProperty( + ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), + SIGNAL_TYPE_METRICS); + // Prepare mocks + prepareMockBehaviorForGoogleCredentials(); + OtlpGrpcMetricExporter mockOtlpGrpcMetricExporter = Mockito.mock(OtlpGrpcMetricExporter.class); + OtlpGrpcMetricExporterBuilder otlpMetricExporterBuilder = OtlpGrpcMetricExporter.builder(); + OtlpGrpcMetricExporterBuilder spyOtlpGrpcMetricExporterBuilder = + Mockito.spy(otlpMetricExporterBuilder); + List exportedMetrics = new ArrayList<>(); + configureGrpcMockMetricExporter( + mockOtlpGrpcMetricExporter, spyOtlpGrpcMetricExporterBuilder, exportedMetrics); + + // begin assertions + try (MockedStatic googleCredentialsMockedStatic = + Mockito.mockStatic(GoogleCredentials.class)) { + googleCredentialsMockedStatic + .when(GoogleCredentials::getApplicationDefault) + .thenReturn(mockedGoogleCredentials); + + OpenTelemetrySdk sdk = buildOpenTelemetrySdkWithExporter(mockOtlpGrpcMetricExporter); + generateTestMetric(sdk); + CompletableResultCode code = sdk.shutdown(); + CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); + assertThat(joinResult.isSuccess()).isTrue(); + + Mockito.verify(mockOtlpGrpcMetricExporter, Mockito.times(1)).toBuilder(); + Mockito.verify(spyOtlpGrpcMetricExporterBuilder, Mockito.times(1)) + .setHeaders(metricHeaderSupplierCaptor.capture()); + assertThat(metricHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(metricHeaderSupplierCaptor.getValue().get())) + .isTrue(); + + Mockito.verify(mockOtlpGrpcMetricExporter, Mockito.atLeast(1)) + .export(Mockito.anyCollection()); + + assertThat(exportedMetrics) + .hasSizeGreaterThan(0) + .allSatisfy( + metricData -> { + assertThat(metricData.getResource().getAttributes().asMap()) + .containsEntry( + AttributeKey.stringKey(GCP_USER_PROJECT_ID_KEY), + DUMMY_GCP_RESOURCE_PROJECT_ID) + .containsEntry(AttributeKey.stringKey("foo"), "bar"); + assertThat(metricData.getLongSumData().getPoints()) + .hasSizeGreaterThan(0) + .allSatisfy( + longPointData -> { + assertThat(longPointData.getAttributes().asMap()) + .containsKey(AttributeKey.longKey("work_loop")); + }); + }); + } + } + + @Test + void testCustomizerFailWithMissingResourceProject() { + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), SIGNAL_TYPE_ALL); OtlpGrpcSpanExporter mockOtlpGrpcSpanExporter = Mockito.mock(OtlpGrpcSpanExporter.class); try (MockedStatic googleCredentialsMockedStatic = Mockito.mockStatic(GoogleCredentials.class)) { @@ -205,35 +376,43 @@ public void testCustomizerFailWithMissingResourceProject() { .when(GoogleCredentials::getApplicationDefault) .thenReturn(mockedGoogleCredentials); - assertThrows( - ConfigurationException.class, - () -> buildOpenTelemetrySdkWithExporter(mockOtlpGrpcSpanExporter)); + assertThatThrownBy(() -> buildOpenTelemetrySdkWithExporter(mockOtlpGrpcSpanExporter)) + .isInstanceOf(ConfigurationException.class); } } @ParameterizedTest @MethodSource("provideQuotaBehaviorTestCases") @SuppressWarnings("CannotMockMethod") - public void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) { + void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) throws IOException { // Set resource project system property System.setProperty( ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); - // Configure mock credentials to return fake access token - Mockito.when(mockedGoogleCredentials.getAccessToken()) - .thenReturn(new AccessToken("fake", Date.from(Instant.now()))); - - // To prevent unncecessary stubbings, mock getQuotaProjectId only when necessary - if (testCase.getUserSpecifiedQuotaProjectId() == null - || testCase.getUserSpecifiedQuotaProjectId().isEmpty()) { - String quotaProjectFromCredential = - testCase.getIsQuotaProjectPresentInCredentials() ? DUMMY_GCP_QUOTA_PROJECT_ID : null; - Mockito.when(mockedGoogleCredentials.getQuotaProjectId()) - .thenReturn(quotaProjectFromCredential); + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), SIGNAL_TYPE_ALL); + + // Prepare request metadata + AccessToken fakeAccessToken = new AccessToken("fake", Date.from(Instant.now())); + ImmutableMap> mockedRequestMetadata; + if (testCase.getIsQuotaProjectPresentInMetadata()) { + mockedRequestMetadata = + ImmutableMap.of( + "Authorization", + Collections.singletonList("Bearer " + fakeAccessToken.getTokenValue()), + QUOTA_USER_PROJECT_HEADER, + Collections.singletonList(DUMMY_GCP_QUOTA_PROJECT_ID)); + } else { + mockedRequestMetadata = + ImmutableMap.of( + "Authorization", + Collections.singletonList("Bearer " + fakeAccessToken.getTokenValue())); } + // mock credentials to return the prepared request metadata + Mockito.when(mockedGoogleCredentials.getRequestMetadata()).thenReturn(mockedRequestMetadata); // configure environment according to test case String quotaProjectId = testCase.getUserSpecifiedQuotaProjectId(); // maybe empty string - if (testCase.getUserSpecifiedQuotaProjectId() != null) { + if (quotaProjectId != null) { // user specified a quota project id System.setProperty( ConfigurableOption.GOOGLE_CLOUD_QUOTA_PROJECT.getSystemProperty(), quotaProjectId); @@ -244,7 +423,7 @@ public void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) { OtlpGrpcSpanExporterBuilder spyOtlpGrpcSpanExporterBuilder = Mockito.spy(OtlpGrpcSpanExporter.builder()); List exportedSpans = new ArrayList<>(); - configureGrpcMockExporters( + configureGrpcMockSpanExporter( mockOtlpGrpcSpanExporter, spyOtlpGrpcSpanExporterBuilder, exportedSpans); try (MockedStatic googleCredentialsMockedStatic = @@ -258,12 +437,12 @@ public void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) { generateTestSpan(sdk); CompletableResultCode code = sdk.shutdown(); CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); - assertTrue(joinResult.isSuccess()); + assertThat(joinResult.isSuccess()).isTrue(); Mockito.verify(spyOtlpGrpcSpanExporterBuilder, Mockito.times(1)) - .setHeaders(headerSupplierCaptor.capture()); + .setHeaders(traceHeaderSupplierCaptor.capture()); // assert that the Authorization bearer token header is present - Map exportHeaders = headerSupplierCaptor.getValue().get(); + Map exportHeaders = traceHeaderSupplierCaptor.getValue().get(); assertThat(exportHeaders).containsEntry("Authorization", "Bearer fake"); if (testCase.getExpectedQuotaProjectInHeader() == null) { @@ -277,6 +456,215 @@ public void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) { } } + @ParameterizedTest + @MethodSource("provideTargetSignalBehaviorTestCases") + void testTargetSignalsBehavior(TargetSignalBehavior testCase) { + // Set resource project system property + System.setProperty( + ConfigurableOption.GOOGLE_CLOUD_PROJECT.getSystemProperty(), DUMMY_GCP_RESOURCE_PROJECT_ID); + // Prepare mocks + // Prepare mocked credential + prepareMockBehaviorForGoogleCredentials(); + + // Prepare mocked span exporter + OtlpGrpcSpanExporter mockOtlpGrpcSpanExporter = Mockito.mock(OtlpGrpcSpanExporter.class); + OtlpGrpcSpanExporterBuilder spyOtlpGrpcSpanExporterBuilder = + Mockito.spy(OtlpGrpcSpanExporter.builder()); + List exportedSpans = new ArrayList<>(); + configureGrpcMockSpanExporter( + mockOtlpGrpcSpanExporter, spyOtlpGrpcSpanExporterBuilder, exportedSpans); + configureGrpcMockSpanExporter( + mockOtlpGrpcSpanExporter, spyOtlpGrpcSpanExporterBuilder, exportedSpans); + + // Prepare mocked metrics exporter + OtlpGrpcMetricExporter mockOtlpGrpcMetricExporter = Mockito.mock(OtlpGrpcMetricExporter.class); + OtlpGrpcMetricExporterBuilder otlpMetricExporterBuilder = OtlpGrpcMetricExporter.builder(); + OtlpGrpcMetricExporterBuilder spyOtlpGrpcMetricExporterBuilder = + Mockito.spy(otlpMetricExporterBuilder); + List exportedMetrics = new ArrayList<>(); + configureGrpcMockMetricExporter( + mockOtlpGrpcMetricExporter, spyOtlpGrpcMetricExporterBuilder, exportedMetrics); + + // configure environment according to test case + System.setProperty( + ConfigurableOption.GOOGLE_OTEL_AUTH_TARGET_SIGNALS.getSystemProperty(), + testCase.getConfiguredTargetSignals()); + + // Build Autoconfigured OpenTelemetry SDK using the mocks and send signals + try (MockedStatic googleCredentialsMockedStatic = + Mockito.mockStatic(GoogleCredentials.class)) { + googleCredentialsMockedStatic + .when(GoogleCredentials::getApplicationDefault) + .thenReturn(mockedGoogleCredentials); + + OpenTelemetrySdk sdk = + buildOpenTelemetrySdkWithExporter( + mockOtlpGrpcSpanExporter, + mockOtlpGrpcMetricExporter, + testCase.getUserSpecifiedOtelProperties()); + generateTestMetric(sdk); + generateTestSpan(sdk); + CompletableResultCode code = sdk.shutdown(); + CompletableResultCode joinResult = code.join(10, TimeUnit.SECONDS); + assertThat(joinResult.isSuccess()).isTrue(); + + // Check Traces modification conditions + if (testCase.getExpectedIsTraceSignalModified()) { + // If traces signal is expected to be modified, auth headers must be present + Mockito.verify(spyOtlpGrpcSpanExporterBuilder, Mockito.times(1)) + .setHeaders(traceHeaderSupplierCaptor.capture()); + assertThat(traceHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(traceHeaderSupplierCaptor.getValue().get())) + .isTrue(); + } else { + // If traces signals is not expected to be modified then no interaction with the builder + // should be made + Mockito.verifyNoInteractions(spyOtlpGrpcSpanExporterBuilder); + } + + // Check Metric modification conditions + if (testCase.getExpectedIsMetricsSignalModified()) { + // If metrics signal is expected to be modified, auth headers must be present + Mockito.verify(spyOtlpGrpcMetricExporterBuilder, Mockito.times(1)) + .setHeaders(metricHeaderSupplierCaptor.capture()); + assertThat(metricHeaderSupplierCaptor.getValue().get().size()).isEqualTo(2); + assertThat(authHeadersQuotaProjectIsPresent(metricHeaderSupplierCaptor.getValue().get())) + .isTrue(); + } else { + // If metrics signals is not expected to be modified then no interaction with the builder + // should be made + Mockito.verifyNoInteractions(spyOtlpGrpcMetricExporterBuilder); + } + } + } + + /** Test cases specifying expected behavior for GOOGLE_OTEL_AUTH_TARGET_SIGNALS */ + private static Stream provideTargetSignalBehaviorTestCases() { + return Stream.of( + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("traces") + .setUserSpecifiedOtelProperties(defaultOtelPropertiesSpanExporter) + .setExpectedIsMetricsSignalModified(false) + .setExpectedIsTraceSignalModified(true) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("metrics") + .setUserSpecifiedOtelProperties(defaultOtelPropertiesMetricExporter) + .setExpectedIsMetricsSignalModified(true) + .setExpectedIsTraceSignalModified(false) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("all") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "otlp", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(true) + .setExpectedIsTraceSignalModified(true) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("metrics, traces") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "otlp", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(true) + .setExpectedIsTraceSignalModified(true) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "otlp", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(true) + .setExpectedIsTraceSignalModified(true) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("all") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "none", + "otel.metrics.exporter", + "none", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(false) + .setExpectedIsTraceSignalModified(false) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("metric, trace") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "otlp", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(false) + .setExpectedIsTraceSignalModified(false) + .build()), + Arguments.of( + TargetSignalBehavior.builder() + .setConfiguredTargetSignals("metrics, trace") + .setUserSpecifiedOtelProperties( + ImmutableMap.of( + "otel.exporter.otlp.metrics.endpoint", + "https://localhost:4813/v1/metrics", + "otel.exporter.otlp.traces.endpoint", + "https://localhost:4813/v1/traces", + "otel.traces.exporter", + "otlp", + "otel.metrics.exporter", + "otlp", + "otel.logs.exporter", + "none")) + .setExpectedIsMetricsSignalModified(true) + .setExpectedIsTraceSignalModified(false) + .build())); + } + /** * Test cases specifying expected value for the user quota project header given the user input and * the current credentials state. @@ -288,72 +676,80 @@ public void testQuotaProjectBehavior(QuotaProjectIdTestBehavior testCase) { * indicates the expectation that the QUOTA_USER_PROJECT_HEADER should not be present in the * export headers. * - *

{@code true} for {@link QuotaProjectIdTestBehavior#getIsQuotaProjectPresentInCredentials()} + *

{@code true} for {@link QuotaProjectIdTestBehavior#getIsQuotaProjectPresentInMetadata()} * indicates that the mocked credentials are configured to provide DUMMY_GCP_QUOTA_PROJECT_ID as * the quota project ID. */ private static Stream provideQuotaBehaviorTestCases() { return Stream.of( + // If quota project present in metadata, it will be used Arguments.of( QuotaProjectIdTestBehavior.builder() .setUserSpecifiedQuotaProjectId(DUMMY_GCP_QUOTA_PROJECT_ID) - .setIsQuotaProjectPresentInCredentials(true) + .setIsQuotaProjectPresentInMetadata(true) .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) .build()), Arguments.of( QuotaProjectIdTestBehavior.builder() - .setUserSpecifiedQuotaProjectId(DUMMY_GCP_QUOTA_PROJECT_ID) - .setIsQuotaProjectPresentInCredentials(false) + .setUserSpecifiedQuotaProjectId("my-custom-quota-project-id") + .setIsQuotaProjectPresentInMetadata(true) .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) .build()), + // If quota project not present in request metadata, then user specified project is used Arguments.of( QuotaProjectIdTestBehavior.builder() - .setUserSpecifiedQuotaProjectId("my-custom-quota-project-id") - .setIsQuotaProjectPresentInCredentials(true) - .setExpectedQuotaProjectInHeader("my-custom-quota-project-id") + .setUserSpecifiedQuotaProjectId(DUMMY_GCP_QUOTA_PROJECT_ID) + .setIsQuotaProjectPresentInMetadata(false) + .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) .build()), Arguments.of( QuotaProjectIdTestBehavior.builder() .setUserSpecifiedQuotaProjectId("my-custom-quota-project-id") - .setIsQuotaProjectPresentInCredentials(false) + .setIsQuotaProjectPresentInMetadata(false) .setExpectedQuotaProjectInHeader("my-custom-quota-project-id") .build()), + // Testing for special edge case inputs + // user-specified quota project is empty Arguments.of( QuotaProjectIdTestBehavior.builder() .setUserSpecifiedQuotaProjectId("") // user explicitly specifies empty - .setIsQuotaProjectPresentInCredentials(true) + .setIsQuotaProjectPresentInMetadata(true) .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) .build()), Arguments.of( QuotaProjectIdTestBehavior.builder() - .setUserSpecifiedQuotaProjectId(null) // user omits specifying quota project - .setIsQuotaProjectPresentInCredentials(true) - .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) + .setUserSpecifiedQuotaProjectId("") + .setIsQuotaProjectPresentInMetadata(false) + .setExpectedQuotaProjectInHeader(null) .build()), Arguments.of( QuotaProjectIdTestBehavior.builder() - .setUserSpecifiedQuotaProjectId("") - .setIsQuotaProjectPresentInCredentials(false) - .setExpectedQuotaProjectInHeader(null) + .setUserSpecifiedQuotaProjectId(null) // user omits specifying quota project + .setIsQuotaProjectPresentInMetadata(true) + .setExpectedQuotaProjectInHeader(DUMMY_GCP_QUOTA_PROJECT_ID) .build()), Arguments.of( QuotaProjectIdTestBehavior.builder() .setUserSpecifiedQuotaProjectId(null) - .setIsQuotaProjectPresentInCredentials(false) + .setIsQuotaProjectPresentInMetadata(false) .setExpectedQuotaProjectInHeader(null) .build())); } - // Configure necessary behavior on the Grpc mock exporters to work - // TODO: Potential improvement - make this work for Http exporter as well. - private static void configureGrpcMockExporters( + // Configure necessary behavior on the gRPC mock span exporters to work. + // Mockito.lenient is used here because this method is used with parameterized tests where based + // on certain inputs, certain stubbings may not be required. + private static void configureGrpcMockSpanExporter( OtlpGrpcSpanExporter mockGrpcExporter, OtlpGrpcSpanExporterBuilder spyGrpcExporterBuilder, List exportedSpanContainer) { - Mockito.when(spyGrpcExporterBuilder.build()).thenReturn(mockGrpcExporter); - Mockito.when(mockGrpcExporter.shutdown()).thenReturn(CompletableResultCode.ofSuccess()); - Mockito.when(mockGrpcExporter.toBuilder()).thenReturn(spyGrpcExporterBuilder); - Mockito.when(mockGrpcExporter.export(Mockito.anyCollection())) + Mockito.lenient().when(spyGrpcExporterBuilder.build()).thenReturn(mockGrpcExporter); + Mockito.lenient() + .when(mockGrpcExporter.shutdown()) + .thenReturn(CompletableResultCode.ofSuccess()); + Mockito.lenient().when(mockGrpcExporter.toBuilder()).thenReturn(spyGrpcExporterBuilder); + Mockito.lenient() + .when(mockGrpcExporter.export(Mockito.anyCollection())) .thenAnswer( invocationOnMock -> { exportedSpanContainer.addAll(invocationOnMock.getArgument(0)); @@ -361,13 +757,95 @@ private static void configureGrpcMockExporters( }); } + // Configure necessary behavior on the http mock metric exporters to work. + private static void configureHttpMockMetricExporter( + OtlpHttpMetricExporter mockOtlpHttpMetricExporter, + OtlpHttpMetricExporterBuilder spyOtlpHttpMetricExporterBuilder, + List exportedMetricContainer) { + Mockito.when(spyOtlpHttpMetricExporterBuilder.build()).thenReturn(mockOtlpHttpMetricExporter); + Mockito.when(mockOtlpHttpMetricExporter.shutdown()) + .thenReturn(CompletableResultCode.ofSuccess()); + Mockito.when(mockOtlpHttpMetricExporter.toBuilder()) + .thenReturn(spyOtlpHttpMetricExporterBuilder); + Mockito.when(mockOtlpHttpMetricExporter.export(Mockito.anyCollection())) + .thenAnswer( + invocationOnMock -> { + exportedMetricContainer.addAll(invocationOnMock.getArgument(0)); + return CompletableResultCode.ofSuccess(); + }); + // mock the get default aggregation and aggregation temporality - they're required for valid + // metric collection. + Mockito.when(mockOtlpHttpMetricExporter.getDefaultAggregation(Mockito.any())) + .thenAnswer( + (Answer) + invocationOnMock -> { + InstrumentType instrumentType = invocationOnMock.getArgument(0); + return OtlpHttpMetricExporter.getDefault().getDefaultAggregation(instrumentType); + }); + Mockito.when(mockOtlpHttpMetricExporter.getAggregationTemporality(Mockito.any())) + .thenAnswer( + (Answer) + invocationOnMock -> { + InstrumentType instrumentType = invocationOnMock.getArgument(0); + return OtlpHttpMetricExporter.getDefault() + .getAggregationTemporality(instrumentType); + }); + } + + // Configure necessary behavior on the gRPC mock metrics exporters to work. + // Mockito.lenient is used here because this method is used with parameterized tests where based + // on certain inputs, certain stubbings may not be required. + private static void configureGrpcMockMetricExporter( + OtlpGrpcMetricExporter mockOtlpGrpcMetricExporter, + OtlpGrpcMetricExporterBuilder spyOtlpGrpcMetricExporterBuilder, + List exportedMetricContainer) { + Mockito.lenient() + .when(spyOtlpGrpcMetricExporterBuilder.build()) + .thenReturn(mockOtlpGrpcMetricExporter); + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.shutdown()) + .thenReturn(CompletableResultCode.ofSuccess()); + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.toBuilder()) + .thenReturn(spyOtlpGrpcMetricExporterBuilder); + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.export(Mockito.anyCollection())) + .thenAnswer( + invocationOnMock -> { + exportedMetricContainer.addAll(invocationOnMock.getArgument(0)); + return CompletableResultCode.ofSuccess(); + }); + // mock the get default aggregation and aggregation temporality - they're required for valid + // metric collection. + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.getDefaultAggregation(Mockito.any())) + .thenAnswer( + (Answer) + invocationOnMock -> { + InstrumentType instrumentType = invocationOnMock.getArgument(0); + return OtlpGrpcMetricExporter.getDefault().getDefaultAggregation(instrumentType); + }); + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.getAggregationTemporality(Mockito.any())) + .thenAnswer( + (Answer) + invocationOnMock -> { + InstrumentType instrumentType = invocationOnMock.getArgument(0); + return OtlpGrpcMetricExporter.getDefault() + .getAggregationTemporality(instrumentType); + }); + Mockito.lenient() + .when(mockOtlpGrpcMetricExporter.getMemoryMode()) + .thenReturn(MemoryMode.IMMUTABLE_DATA); + } + @AutoValue abstract static class QuotaProjectIdTestBehavior { // A null user specified quota represents the use case where user omits specifying quota @Nullable abstract String getUserSpecifiedQuotaProjectId(); - abstract boolean getIsQuotaProjectPresentInCredentials(); + abstract boolean getIsQuotaProjectPresentInMetadata(); // If expected quota project in header is null, the header entry should not be present in export @Nullable @@ -382,52 +860,147 @@ static Builder builder() { abstract static class Builder { abstract Builder setUserSpecifiedQuotaProjectId(String quotaProjectId); - abstract Builder setIsQuotaProjectPresentInCredentials( - boolean quotaProjectPresentInCredentials); + abstract Builder setIsQuotaProjectPresentInMetadata(boolean quotaProjectPresentInMetadata); + /** + * Sets the expected quota project header value for the test case. A null value is allowed, + * and it indicates that the header should not be present in the export request. + * + * @param expectedQuotaProjectInHeader the expected header value to match in the export + * headers. + */ abstract Builder setExpectedQuotaProjectInHeader(String expectedQuotaProjectInHeader); abstract QuotaProjectIdTestBehavior build(); } } + @AutoValue + abstract static class TargetSignalBehavior { + @Nonnull + abstract String getConfiguredTargetSignals(); + + @Nonnull + abstract ImmutableMap getUserSpecifiedOtelProperties(); + + abstract boolean getExpectedIsTraceSignalModified(); + + abstract boolean getExpectedIsMetricsSignalModified(); + + static Builder builder() { + return new AutoValue_GcpAuthAutoConfigurationCustomizerProviderTest_TargetSignalBehavior + .Builder(); + } + + @AutoValue.Builder + abstract static class Builder { + abstract Builder setConfiguredTargetSignals(String targetSignals); + + abstract Builder setUserSpecifiedOtelProperties(Map oTelProperties); + + // Set whether the combination of specified OTel properties and configured target signals + // should lead to modification of the OTLP trace exporters. + abstract Builder setExpectedIsTraceSignalModified(boolean expectedModified); + + // Set whether the combination of specified OTel properties and configured target signals + // should lead to modification of the OTLP metrics exporters. + abstract Builder setExpectedIsMetricsSignalModified(boolean expectedModified); + + abstract TargetSignalBehavior build(); + } + } + + // Mockito.lenient is used here because this method is used with parameterized tests where based @SuppressWarnings("CannotMockMethod") private void prepareMockBehaviorForGoogleCredentials() { - Mockito.when(mockedGoogleCredentials.getQuotaProjectId()) - .thenReturn(DUMMY_GCP_QUOTA_PROJECT_ID); - Mockito.when(mockedGoogleCredentials.getAccessToken()) - .thenReturn(new AccessToken("fake", Date.from(Instant.now()))); + AccessToken fakeAccessToken = new AccessToken("fake", Date.from(Instant.now())); + try { + Mockito.lenient() + .when(mockedGoogleCredentials.getRequestMetadata()) + .thenReturn( + ImmutableMap.of( + "Authorization", + Collections.singletonList("Bearer " + fakeAccessToken.getTokenValue()), + QUOTA_USER_PROJECT_HEADER, + Collections.singletonList(DUMMY_GCP_QUOTA_PROJECT_ID))); + } catch (IOException e) { + throw new RuntimeException(e); + } } private OpenTelemetrySdk buildOpenTelemetrySdkWithExporter(SpanExporter spanExporter) { + return buildOpenTelemetrySdkWithExporter( + spanExporter, OtlpHttpMetricExporter.getDefault(), defaultOtelPropertiesSpanExporter); + } + + @SuppressWarnings("UnusedMethod") + private OpenTelemetrySdk buildOpenTelemetrySdkWithExporter( + SpanExporter spanExporter, ImmutableMap customOTelProperties) { + return buildOpenTelemetrySdkWithExporter( + spanExporter, OtlpHttpMetricExporter.getDefault(), customOTelProperties); + } + + private OpenTelemetrySdk buildOpenTelemetrySdkWithExporter(MetricExporter metricExporter) { + return buildOpenTelemetrySdkWithExporter( + OtlpHttpSpanExporter.getDefault(), metricExporter, defaultOtelPropertiesMetricExporter); + } + + @SuppressWarnings("UnusedMethod") + private OpenTelemetrySdk buildOpenTelemetrySdkWithExporter( + MetricExporter metricExporter, ImmutableMap customOtelProperties) { + return buildOpenTelemetrySdkWithExporter( + OtlpHttpSpanExporter.getDefault(), metricExporter, customOtelProperties); + } + + private OpenTelemetrySdk buildOpenTelemetrySdkWithExporter( + SpanExporter spanExporter, + MetricExporter metricExporter, + ImmutableMap customOtelProperties) { SpiHelper spiHelper = SpiHelper.create(GcpAuthAutoConfigurationCustomizerProviderTest.class.getClassLoader()); AutoConfiguredOpenTelemetrySdkBuilder builder = - AutoConfiguredOpenTelemetrySdk.builder().addPropertiesSupplier(() -> otelProperties); - AutoConfigureUtil.setComponentLoader( - builder, - new ComponentLoader() { - @SuppressWarnings("unchecked") - @Override - public List load(Class spiClass) { - if (spiClass == ConfigurableSpanExporterProvider.class) { - return Collections.singletonList( - (T) - new ConfigurableSpanExporterProvider() { - @Override - public SpanExporter createExporter(ConfigProperties configProperties) { - return spanExporter; - } - - @Override - public String getName() { - return "otlp"; - } - }); - } - return spiHelper.load(spiClass); - } - }); + AutoConfiguredOpenTelemetrySdk.builder() + .addPropertiesSupplier(() -> customOtelProperties) + .setComponentLoader( + new ComponentLoader() { + @Override + public List load(Class spiClass) { + if (spiClass == ConfigurableSpanExporterProvider.class) { + return Collections.singletonList( + spiClass.cast( + new ConfigurableSpanExporterProvider() { + @Override + public SpanExporter createExporter( + ConfigProperties configProperties) { + return spanExporter; + } + + @Override + public String getName() { + return "otlp"; + } + })); + } + if (spiClass == ConfigurableMetricExporterProvider.class) { + return Collections.singletonList( + spiClass.cast( + new ConfigurableMetricExporterProvider() { + @Override + public MetricExporter createExporter( + ConfigProperties configProperties) { + return metricExporter; + } + + @Override + public String getName() { + return "otlp"; + } + })); + } + return spiHelper.load(spiClass); + } + }); + return builder.build().getOpenTelemetrySdk(); } @@ -450,6 +1023,19 @@ private static void generateTestSpan(OpenTelemetrySdk openTelemetrySdk) { } } + private static void generateTestMetric(OpenTelemetrySdk openTelemetrySdk) { + LongCounter longCounter = + openTelemetrySdk + .getMeter("test") + .counterBuilder("sample") + .setDescription("sample counter") + .setUnit("1") + .build(); + long workOutput = busyloop(); + long randomValue = TEST_RANDOM.nextInt(1000); + longCounter.add(randomValue, Attributes.of(AttributeKey.longKey("work_loop"), workOutput)); + } + // loop to simulate work done private static long busyloop() { Instant start = Instant.now(); diff --git a/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.java b/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.java index 421d4fcec..f87762474 100644 --- a/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.java +++ b/gcp-auth-extension/src/test/java/io/opentelemetry/contrib/gcp/auth/GcpAuthExtensionEndToEndTest.java @@ -7,9 +7,8 @@ import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.GCP_USER_PROJECT_ID_KEY; import static io.opentelemetry.contrib.gcp.auth.GcpAuthAutoConfigurationCustomizerProvider.QUOTA_USER_PROJECT_HEADER; +import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockserver.model.HttpRequest.request; import static org.mockserver.model.HttpResponse.response; import static org.mockserver.stop.Stop.stopQuietly; @@ -51,7 +50,7 @@ @SpringBootTest( classes = {Application.class}, webEnvironment = WebEnvironment.RANDOM_PORT) -public class GcpAuthExtensionEndToEndTest { +class GcpAuthExtensionEndToEndTest { @LocalServerPort private int testApplicationPort; // port at which the spring app is running @@ -89,10 +88,9 @@ public static void setup() throws NoSuchAlgorithmException, KeyManagementExcepti // Set up mock OTLP backend server to which traces will be exported backendServer = ClientAndServer.startClientAndServer(EXPORTER_ENDPOINT_PORT); backendServer.when(request()).respond(response().withStatusCode(200)); - - // Set up the mock gcp metadata server to provide fake credentials String accessTokenResponse = "{\"access_token\": \"fake.access_token\",\"expires_in\": 3600, \"token_type\": \"Bearer\"}"; + mockGcpOAuth2Server = ClientAndServer.startClientAndServer(MOCK_GCP_OAUTH2_PORT); MockServerClient mockServerClient = @@ -116,7 +114,7 @@ public static void teardown() { } @Test - public void authExtensionSmokeTest() { + void authExtensionSmokeTest() { template.getForEntity( URI.create("http://localhost:" + testApplicationPort + "/ping"), String.class); @@ -161,24 +159,22 @@ public X509Certificate[] getAcceptedIssuers() { private static void verifyResourceAttributes(List extractedResourceSpans) { extractedResourceSpans.forEach( resourceSpan -> - assertTrue( - resourceSpan - .getResource() - .getAttributesList() - .contains( - KeyValue.newBuilder() - .setKey(GCP_USER_PROJECT_ID_KEY) - .setValue(AnyValue.newBuilder().setStringValue(DUMMY_GCP_PROJECT)) - .build()))); + assertThat(resourceSpan.getResource().getAttributesList()) + .contains( + KeyValue.newBuilder() + .setKey(GCP_USER_PROJECT_ID_KEY) + .setValue(AnyValue.newBuilder().setStringValue(DUMMY_GCP_PROJECT)) + .build())); } private static void verifyRequestHeaders(List extractedHeaders) { - assertFalse(extractedHeaders.isEmpty()); + assertThat(extractedHeaders).isNotEmpty(); // verify if extension added the required headers extractedHeaders.forEach( headers -> { - assertTrue(headers.containsEntry(QUOTA_USER_PROJECT_HEADER, DUMMY_GCP_QUOTA_PROJECT)); - assertTrue(headers.containsEntry("Authorization", "Bearer fake.access_token")); + assertThat(headers.containsEntry(QUOTA_USER_PROJECT_HEADER, DUMMY_GCP_QUOTA_PROJECT)) + .isTrue(); + assertThat(headers.containsEntry("Authorization", "Bearer fake.access_token")).isTrue(); }); } diff --git a/gcp-auth-extension/src/test/resources/fake_user_creds.json b/gcp-auth-extension/src/test/resources/fake_user_creds.json new file mode 100644 index 000000000..fd798897f --- /dev/null +++ b/gcp-auth-extension/src/test/resources/fake_user_creds.json @@ -0,0 +1,7 @@ +{ + "client_id": "....apps.googleusercontent.com", + "client_secret": "...", + "refresh_token": "1//...", + "quota_project_id": "your-configured-quota-project", + "type": "authorized_user" +} diff --git a/gcp-auth-extension/src/test/resources/fakecreds.json b/gcp-auth-extension/src/test/resources/fakecreds.json deleted file mode 100644 index 1000f70db..000000000 --- a/gcp-auth-extension/src/test/resources/fakecreds.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "type": "service_account", - "project_id": "quota-project-id", - "private_key_id": "aljmafmlamlmmasma", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALX0PQoe1igW12ikv1bN/r9lN749y2ijmbc/mFHPyS3hNTyOCjDvBbXYbDhQJzWVUikh4mvGBA07qTj79Xc3yBDfKP2IeyYQIFe0t0zkd7R9Zdn98Y2rIQC47aAbDfubtkU1U72t4zL11kHvoa0/RuFZjncvlr42X7be7lYh4p3NAgMBAAECgYASk5wDw4Az2ZkmeuN6Fk/y9H+Lcb2pskJIXjrL533vrDWGOC48LrsThMQPv8cxBky8HFSEklPpkfTF95tpD43iVwJRB/GrCtGTw65IfJ4/tI09h6zGc4yqvIo1cHX/LQ+SxKLGyir/dQM925rGt/VojxY5ryJR7GLbCzxPnJm/oQJBANwOCO6D2hy1LQYJhXh7O+RLtA/tSnT1xyMQsGT+uUCMiKS2bSKx2wxo9k7h3OegNJIu1q6nZ6AbxDK8H3+d0dUCQQDTrPSXagBxzp8PecbaCHjzNRSQE2in81qYnrAFNB4o3DpHyMMY6s5ALLeHKscEWnqP8Ur6X4PvzZecCWU9BKAZAkAutLPknAuxSCsUOvUfS1i87ex77Ot+w6POp34pEX+UWb+u5iFn2cQacDTHLV1LtE80L8jVLSbrbrlH43H0DjU5AkEAgidhycxS86dxpEljnOMCw8CKoUBd5I880IUahEiUltk7OLJYS/Ts1wbn3kPOVX3wyJs8WBDtBkFrDHW2ezth2QJADj3e1YhMVdjJW5jqwlD/VNddGjgzyunmiZg0uOXsHXbytYmsA545S8KRQFaJKFXYYFo2kOjqOiC1T2cAzMDjCQ==\n-----END PRIVATE KEY-----\n", - "client_email": "sample@appspot.gserviceaccount.com", - "client_id": "100000000000000000221", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "", - "client_x509_cert_url": "", - "universe_domain": "googleapis.com" -} diff --git a/gcp-resources/README.md b/gcp-resources/README.md index 2305c0e21..9f670e088 100644 --- a/gcp-resources/README.md +++ b/gcp-resources/README.md @@ -47,7 +47,7 @@ env: ## Usage with Manual Instrumentation -It is recommended to use this resource detector with the [OpenTelemetry Autoconfiguration SPI](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/autoconfigure/README.md#resource-provider-spi). The GCP resource detector automatically provides the detected resources via the [autoconfigure-spi](https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure-spi) SDK extension. +It is recommended to use this resource detector with the [OpenTelemetry SDK autoconfiguration](https://opentelemetry.io/docs/languages/java/configuration/#zero-code-sdk-autoconfigure). The GCP resource detector automatically provides the detected resources via the [autoconfigure-spi](https://opentelemetry.io/docs/languages/java/configuration/#spi-service-provider-interface) SDK extension. For a reference example showcasing the detected resource attributes and usage with `autoconfigure-spi`, see the [Resource detection example](https://github.com/open-telemetry/opentelemetry-java-examples/tree/main/resource-detection-gcp). @@ -55,7 +55,7 @@ For a reference example showcasing the detected resource attributes and usage wi With the release of [v2.2.0 of the OpenTelemetry Java Instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/tag/v2.2.0), the GCP resource detector is now included with the Java agent. -For users of Java Agent v2.2.0 and later, the GCP resource detectors can be enabled by following the instructions provided [here](https://opentelemetry.io/docs/languages/java/automatic/configuration/#enable-resource-providers-that-are-disabled-by-default). +For users of Java Agent v2.2.0 and later, the GCP resource detectors can be enabled by following the instructions provided in the [agent configuration documentation](https://opentelemetry.io/docs/languages/java/automatic/configuration/#enable-resource-providers-that-are-disabled-by-default). ## Component Owners diff --git a/gcp-resources/build.gradle.kts b/gcp-resources/build.gradle.kts index 2808f9ae2..cc227ed6e 100644 --- a/gcp-resources/build.gradle.kts +++ b/gcp-resources/build.gradle.kts @@ -9,10 +9,11 @@ otelJava.moduleName.set("io.opentelemetry.contrib.gcp.resource") dependencies { api("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") api("io.opentelemetry:opentelemetry-sdk") // Provides GCP resource detection support - implementation("com.google.cloud.opentelemetry:detector-resources-support:0.34.0") + implementation("com.google.cloud.opentelemetry:detector-resources-support:0.36.0") testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") @@ -21,6 +22,7 @@ dependencies { implementation("com.fasterxml.jackson.core:jackson-core") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") testImplementation("org.mockito:mockito-core") diff --git a/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProvider.java b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProvider.java index b7e49c1a9..4b92ade57 100644 --- a/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProvider.java +++ b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProvider.java @@ -65,6 +65,7 @@ public class GCPResourceProvider implements ConditionalResourceProvider { private static final Logger LOGGER = Logger.getLogger(GCPResourceProvider.class.getSimpleName()); + private final GCPPlatformDetector detector; // for testing only diff --git a/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/IncubatingAttributes.java b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/IncubatingAttributes.java index 745d440fd..2f0f937f0 100644 --- a/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/IncubatingAttributes.java +++ b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/IncubatingAttributes.java @@ -13,8 +13,6 @@ */ class IncubatingAttributes { - private IncubatingAttributes() {} - public static final AttributeKey CLOUD_ACCOUNT_ID = AttributeKey.stringKey("cloud.account.id"); public static final AttributeKey CLOUD_AVAILABILITY_ZONE = @@ -26,7 +24,6 @@ private IncubatingAttributes() {} public static final AttributeKey CLOUD_REGION = AttributeKey.stringKey("cloud.region"); public static final class CloudPlatformIncubatingValues { - private CloudPlatformIncubatingValues() {} public static final String GCP_COMPUTE_ENGINE = "gcp_compute_engine"; public static final String GCP_CLOUD_RUN = "gcp_cloud_run"; @@ -34,6 +31,8 @@ private CloudPlatformIncubatingValues() {} public static final String GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions"; public static final String GCP_APP_ENGINE = "gcp_app_engine"; public static final String GCP = "gcp"; + + private CloudPlatformIncubatingValues() {} } public static final AttributeKey FAAS_INSTANCE = AttributeKey.stringKey("faas.instance"); @@ -56,4 +55,6 @@ private CloudPlatformIncubatingValues() {} public static final AttributeKey K8S_CLUSTER_NAME = AttributeKey.stringKey("k8s.cluster.name"); + + private IncubatingAttributes() {} } diff --git a/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/internal/GcpResourceDetector.java b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/internal/GcpResourceDetector.java new file mode 100644 index 000000000..35adbeded --- /dev/null +++ b/gcp-resources/src/main/java/io/opentelemetry/contrib/gcp/resource/internal/GcpResourceDetector.java @@ -0,0 +1,32 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.gcp.resource.internal; + +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.resources.ResourceBuilder; + +public class GcpResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "gcp"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + ResourceBuilder builder = Resource.builder(); + builder.putAll(new GCPResourceProvider().getAttributes()); + return builder.build(); + } +} diff --git a/gcp-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider b/gcp-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider new file mode 100644 index 000000000..e65bbc840 --- /dev/null +++ b/gcp-resources/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider @@ -0,0 +1 @@ +io.opentelemetry.contrib.gcp.resource.internal.GcpResourceDetector diff --git a/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProviderTest.java b/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProviderTest.java index 9b17b22d2..9314a1b67 100644 --- a/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProviderTest.java +++ b/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/GCPResourceProviderTest.java @@ -52,7 +52,7 @@ import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_NAME; import static io.opentelemetry.semconv.incubating.HostIncubatingAttributes.HOST_TYPE; import static io.opentelemetry.semconv.incubating.K8sIncubatingAttributes.K8S_CLUSTER_NAME; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.verify; import com.google.cloud.opentelemetry.detection.DetectedPlatform; @@ -186,7 +186,7 @@ private static DetectedPlatform generateMockUnknownPlatform() { } @Test - public void testGceResourceAttributesMapping() { + void testGceResourceAttributesMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGcePlatform(); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -210,7 +210,7 @@ public void testGceResourceAttributesMapping() { } @Test - public void testGkeResourceAttributesMapping_LocationTypeRegion() { + void testGkeResourceAttributesMapping_LocationTypeRegion() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGkePlatform(GKE_LOCATION_TYPE_REGION); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -227,7 +227,7 @@ public void testGkeResourceAttributesMapping_LocationTypeRegion() { } @Test - public void testGkeResourceAttributesMapping_LocationTypeZone() { + void testGkeResourceAttributesMapping_LocationTypeZone() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGkePlatform(GKE_LOCATION_TYPE_ZONE); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -245,7 +245,7 @@ public void testGkeResourceAttributesMapping_LocationTypeZone() { } @Test - public void testGkeResourceAttributesMapping_LocationTypeInvalid() { + void testGkeResourceAttributesMapping_LocationTypeInvalid() { Map mockGKEAttributes = new HashMap<>(mockGKECommonAttributes); mockGKEAttributes.put(GKE_CLUSTER_LOCATION_TYPE, "INVALID"); mockGKEAttributes.put(GKE_CLUSTER_LOCATION, "some-location"); @@ -270,7 +270,7 @@ public void testGkeResourceAttributesMapping_LocationTypeInvalid() { } @Test - public void testGkeResourceAttributesMapping_LocationMissing() { + void testGkeResourceAttributesMapping_LocationMissing() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGkePlatform(""); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -295,7 +295,7 @@ private static void verifyGkeMapping(Resource gotResource, DetectedPlatform dete } @Test - public void testGcrServiceResourceAttributesMapping() { + void testGcrServiceResourceAttributesMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockServerlessPlatform(GCPPlatformDetector.SupportedPlatform.GOOGLE_CLOUD_RUN); @@ -312,7 +312,7 @@ public void testGcrServiceResourceAttributesMapping() { } @Test - public void testGcfResourceAttributeMapping() { + void testGcfResourceAttributeMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockServerlessPlatform( @@ -343,7 +343,7 @@ private static void verifyServerlessMapping( } @Test - public void testGcrJobResourceAttributesMapping() { + void testGcrJobResourceAttributesMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGcrJobPlatform(); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -368,7 +368,7 @@ public void testGcrJobResourceAttributesMapping() { } @Test - public void testGaeResourceAttributeMapping() { + void testGaeResourceAttributeMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockGaePlatform(); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -390,7 +390,7 @@ public void testGaeResourceAttributeMapping() { } @Test - public void testUnknownPlatformResourceAttributesMapping() { + void testUnknownPlatformResourceAttributesMapping() { GCPPlatformDetector mockDetector = Mockito.mock(GCPPlatformDetector.class); DetectedPlatform mockPlatform = generateMockUnknownPlatform(); Mockito.when(mockDetector.detectPlatform()).thenReturn(mockPlatform); @@ -400,7 +400,7 @@ public void testUnknownPlatformResourceAttributesMapping() { } @Test - public void findsWithServiceLoader() { + void findsWithServiceLoader() { ServiceLoader services = ServiceLoader.load(ResourceProvider.class, getClass().getClassLoader()); while (services.iterator().hasNext()) { diff --git a/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/ResourceComponentProviderTest.java b/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/ResourceComponentProviderTest.java new file mode 100644 index 000000000..b80d6c427 --- /dev/null +++ b/gcp-resources/src/test/java/io/opentelemetry/contrib/gcp/resource/ResourceComponentProviderTest.java @@ -0,0 +1,24 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.gcp.resource; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.common.ComponentLoader; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + + @Test + @SuppressWarnings("rawtypes") + void providerIsLoaded() { + Iterable providers = + ComponentLoader.forClassLoader(ResourceComponentProviderTest.class.getClassLoader()) + .load(ComponentProvider.class); + assertThat(providers).extracting(ComponentProvider::getName).containsExactly("gcp"); + } +} diff --git a/gradle.properties b/gradle.properties index ef53d84af..bc596d5e4 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,11 +1,9 @@ org.gradle.parallel=true org.gradle.caching=true +org.gradle.configuration-cache=true +org.gradle.configuration-cache.parallel=true org.gradle.priority=low # Gradle default is 256m which causes issues with our build - https://docs.gradle.org/current/userguide/build_environment.html#sec:configuring_jvm_memory org.gradle.jvmargs=-XX:MaxMetaspaceSize=512m - -# Workaround https://youtrack.jetbrains.com/issue/KT-47152 -# We don't have enough kotlin code to care about incremental compilation anyways. -kotlin.incremental=false diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 9bbc975c7..8bdaf60c7 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 36e4933e1..6a38a8cea 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=20f1b1176237254a6fc204d8434196fa11a4cfb387567519c61556e8710aed78 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.13-bin.zip +distributionSha256Sum=a17ddd85a26b6a7f5ddb71ff8b05fc5104c0202c6e64782429790c933686c806 +distributionUrl=https\://services.gradle.org/distributions/gradle-9.1.0-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index faf93008b..adff685a0 100755 --- a/gradlew +++ b/gradlew @@ -1,7 +1,7 @@ #!/bin/sh # -# Copyright © 2015-2021 the original authors. +# Copyright © 2015 the original authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -114,7 +114,6 @@ case "$( uname )" in #( NONSTOP* ) nonstop=true ;; esac -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. @@ -172,7 +171,6 @@ fi # For Cygwin or MSYS, switch paths to Windows format before running java if "$cygwin" || "$msys" ; then APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) - CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) JAVACMD=$( cygpath --unix "$JAVACMD" ) @@ -212,8 +210,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ - -classpath "$CLASSPATH" \ - org.gradle.wrapper.GradleWrapperMain \ + -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ "$@" # Stop when "xargs" is not available. diff --git a/gradlew.bat b/gradlew.bat index 9d21a2183..c4bdd3ab8 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -70,11 +70,10 @@ goto fail :execute @rem Setup the command line -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* :end @rem End local scope for the variables with windows NT shell diff --git a/ibm-mq-metrics/Makefile b/ibm-mq-metrics/Makefile new file mode 100644 index 000000000..d6ad8e42a --- /dev/null +++ b/ibm-mq-metrics/Makefile @@ -0,0 +1,84 @@ +# From where to resolve the containers (e.g. "otel/weaver"). +WEAVER_CONTAINER_REPOSITORY=docker.io +# Versioned, non-qualified references to containers used in this Makefile. +# These are parsed from dependencies.Dockerfile so dependabot will autoupdate +# the versions of docker files we use. +VERSIONED_WEAVER_CONTAINER_NO_REPO=$(shell cat weaver.Dockerfile | awk '$$4=="weaver" {print $$2}') +# Versioned, non-qualified references to containers used in this Makefile. +WEAVER_CONTAINER=$(WEAVER_CONTAINER_REPOSITORY)/$(VERSIONED_WEAVER_CONTAINER_NO_REPO) + +# Next - we want to run docker as our local file user, so generated code is not +# owned by root, and we don't give unnecessary access. +# +# Determine if "docker" is actually podman +DOCKER_VERSION_OUTPUT := $(shell docker --version 2>&1) +DOCKER_IS_PODMAN := $(shell echo $(DOCKER_VERSION_OUTPUT) | grep -c podman) +ifeq ($(DOCKER_IS_PODMAN),0) + DOCKER_COMMAND := docker +else + DOCKER_COMMAND := podman +endif +DOCKER_RUN=$(DOCKER_COMMAND) run +DOCKER_USER=$(shell id -u):$(shell id -g) +DOCKER_USER_IS_HOST_USER_ARG=-u $(DOCKER_USER) +ifeq ($(DOCKER_COMMAND),podman) + # On podman, additional arguments are needed to make "-u" work + # correctly with the host user ID and host group ID. + # + # Error: OCI runtime error: crun: setgroups: Invalid argument + DOCKER_USER_IS_HOST_USER_ARG=--userns=keep-id -u $(DOCKER_USER) +endif + +.PHONY: generate-docs +generate-docs: + mkdir -p docs + $(DOCKER_RUN) --rm \ + $(DOCKER_USER_IS_HOST_USER_ARG) \ + --mount 'type=bind,source=$(PWD)/model,target=/home/weaver/model,readonly' \ + --mount 'type=bind,source=$(PWD)/templates,target=/home/weaver/templates,readonly' \ + --mount 'type=bind,source=$(PWD)/docs,target=/home/weaver/target' \ + ${WEAVER_CONTAINER} registry generate \ + --registry=/home/weaver/model \ + markdown \ + --future \ + /home/weaver/target + +.PHONY: check +check: + $(DOCKER_RUN) --rm \ + $(DOCKER_USER_IS_HOST_USER_ARG) \ + --mount 'type=bind,source=$(PWD)/model,target=/home/weaver/model,readonly' \ + --mount 'type=bind,source=$(PWD)/templates,target=/home/weaver/templates,readonly' \ + --mount 'type=bind,source=$(PWD)/docs,target=/home/weaver/target' \ + ${WEAVER_CONTAINER} registry check \ + --registry=/home/weaver/model + +.PHONY: generate-java +generate-java: + mkdir -p src/main/java/io/opentelemetry/ibm/mq/metrics + $(DOCKER_RUN) --rm \ + $(DOCKER_USER_IS_HOST_USER_ARG) \ + --mount 'type=bind,source=$(PWD)/model,target=/home/weaver/model,readonly' \ + --mount 'type=bind,source=$(PWD)/templates,target=/home/weaver/templates,readonly' \ + --mount 'type=bind,source=$(PWD)/src/main/java/io/opentelemetry/ibm/mq/metrics,target=/home/weaver/target' \ + ${WEAVER_CONTAINER} registry generate \ + --registry=/home/weaver/model \ + java \ + --future \ + /home/weaver/target + +.PHONY: generate-yaml +generate-yaml: + $(DOCKER_RUN) --rm \ + $(DOCKER_USER_IS_HOST_USER_ARG) \ + --mount 'type=bind,source=$(PWD)/model,target=/home/weaver/model,readonly' \ + --mount 'type=bind,source=$(PWD)/templates,target=/home/weaver/templates,readonly' \ + --mount 'type=bind,source=$(PWD)/,target=/home/weaver/target' \ + ${WEAVER_CONTAINER} registry generate \ + --registry=/home/weaver/model \ + yaml \ + --future \ + /home/weaver/target + +.PHONY: generate +generate: generate-docs generate-yaml generate-java diff --git a/ibm-mq-metrics/README.md b/ibm-mq-metrics/README.md new file mode 100644 index 000000000..ced5c3530 --- /dev/null +++ b/ibm-mq-metrics/README.md @@ -0,0 +1,238 @@ +# IBM MQ Metrics + +:warning: This software is under development. + +## Use case + +IBM MQ, formerly known as WebSphere MQ (message queue) series, is an IBM software for +program-to-program messaging across multiple platforms. + +The IBM MQ metrics utility here can monitor multiple queues managers and their resources, +namely queues, topics, channels and listeners The metrics are extracted out using the +[PCF command messages](https://www.ibm.com/docs/en/ibm-mq/8.0.0?topic=tasks-introduction-programmable-command-formats). + +The metrics for queue manager, queue, topic, channel and listener can be configured. + +The MQ Monitor is compatible with IBM MQ version 7.x, 8.x and 9.x. + +## Prerequisites + +This software requires compilation with Java 11. +It targets language level 8 and outputs java 8 class files. + +The extension has a dependency on the following jar's depending on IBM MQ version: + +* v8.0.0 and above + +``` +com.ibm.mq.allclient.jar +``` + +* For other versions + +``` +com.ibm.mq.commonservices.jar +com.ibm.mq.jar +com.ibm.mq.jmqi.jar +com.ibm.mq.headers.jar +com.ibm.mq.pcf.jar +dhbcore.jar +connector.jar +``` + +These jar files are typically found in ```/opt/mqm/java/lib``` on a UNIX server but may be +found in an alternate location depending upon your environment. + +In case of **CLIENT** transport type, IBM MQ Client must be installed to get the MQ jars. +[The IBM MQ Client jars can be downloaded here](https://developer.ibm.com/messaging/mq-downloads/). + +### MQ monitoring configuration + +This software reads events from event queues associated with the queue manager: + +* `SYSTEM.ADMIN.PERFM.EVENT`: Performance events, such as low, high, and full queue depth events. +* `SYSTEM.ADMIN.QMGR.EVENT`: Authority events +* `SYSTEM.ADMIN.CONFIG.EVENT`: Configuration events + +Please turn on those events to take advantage of this monitoring. + +## Build + +Build the package with: + +```shell +cd ibm-mq-metrics +../gradlew shadowJar +``` + +Note: Due to restrictive licensing, this uber-jar (fat-jar) does not include the IBM client jar. + +## Run + +Run the standalone jar alongside the IBM jar: + +```shell +cd ibm-mq-metrics +java \ + -Djavax.net.ssl.keyStore=key.jks \ + -Djavax.net.ssl.keyStorePassword= \ + -Djavax.net.ssl.trustStore=key.jks \ + -Djavax.net.ssl.trustStorePassword= \ + -cp build/libs/opentelemetry-ibm-mq-monitoring--all.jar:lib/com.ibm.mq.allclient.jar \ + io.opentelemetry.ibm.mq.opentelemetry.Main \ + ./my-config.yml +``` + +## Generate code with Weaver + +Weaver generates code, documentation and configuration for this program. + +```shell +make generate +``` + +This generates `config.yaml`, the `docs` folder, the `src/main/java/io/opentelemetry/ibm/mq/metrics` +Java code folder. + +## Connection + +There are two transport modes in which this extension can be run: + +* **Binding** : Requires WMQ Extension to be deployed in machine agent on the same machine where + WMQ server is installed. +* **Client** : In this mode, the WMQ extension is installed on a different host than the IBM MQ + server. Please install the [IBM MQ Client](https://developer.ibm.com/messaging/mq-downloads/) + for this mode to get the necessary jars as mentioned previously. + +If this extension is configured for **CLIENT** transport type + +1. Please make sure the MQ's host and port is accessible. +2. Credentials of user with correct access rights would be needed in config.yml. +3. If the hosting OS for IBM MQ is Windows, Windows user credentials will be needed. + +If you are in **Bindings** mode, please make sure to start the MA process under a user which has +the following permissions on the broker. Similarly, for **Client** mode, please provide the user +credentials in config.yml which have permissions listed below. + +The user connecting to the queueManager should have the inquire, get, put (since PCF responses +cause dynamic queues to be created) permissions. For metrics that execute MQCMD_RESET_Q_STATS +command, chg permission is needed. + +### SSL Support + +_Note: The following is only needed for versions of Java 8 before 8u161._ + +1. Configure the IBM SSL Cipher Suite in the config.yml. + Note that, to use some CipherSuites the unrestricted policy needs to be configured in JRE. + Please visit [this link](http://www.ibm.com/support/knowledgecenter/SSYKE2_8.0.0/com.ibm.java.security.component.80.doc/security-component/sdkpolicyfiles.html) + for more details. For Oracle JRE, please update with [JCE Unlimited Strength Jurisdiction Policy](http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html). + The download includes a readme file with instructions on how to apply these files to JRE. + +2. Please add the following JVM arguments to the MA start up command or script. + + ```-Dcom.ibm.mq.cfg.useIBMCipherMappings=false``` (If you are using IBM Cipher Suites, set the + flag to true. Please visit [this link](https://www.ibm.com/docs/en/ibm-mq/8.0.0?topic=java-ssltls-cipherspecs-ciphersuites-in-mq-classes) for more details. + ) +3. To configure SSL, the MA's trust store and keystore needs to be setup with the JKS filepath. + They can be passed either as Machine Agent JVM arguments or configured in config.yml (sslConnection)
+ + a. Machine Agent JVM arguments as follows: + + ```-Djavax.net.ssl.trustStore=```
+ ```-Djavax.net.ssl.trustStorePassword=```
+ ```-Djavax.net.ssl.keyStore=```
+ ```-Djavax.net.ssl.keyStorePassword=```
+ + b. sslConnection in config.yml, configure the trustStorePassword. Same holds for keyStore configuration as well. + + ``` + sslConnection: + trustStorePath: "" + trustStorePassword: "" + + keyStorePath: "" + keyStorePassword: "" + ``` + +## Configuration + +**Note** : Please make sure to not use tab (\t) while editing yaml files. You may want to validate +the yaml file using a [yaml validator](https://jsonformatter.org/yaml-validator). Configure the monitor by copying and editing the +config.yml file. + +1. Configure the queueManagers with appropriate fields and filters. You can configure multiple + queue managers in one configuration file. +2. To run the extension at a frequency > 1 minute, please configure the taskSchedule section. + +### Monitoring Workings - Internals + +This software extracts metrics through [PCF framework](https://www.ibm.com/docs/en/ibm-mq/8.0.0?topic=tasks-introduction-programmable-command-formats). +[A complete list of PCF commands are listed here](https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q086870_.htm). +Each queue manager has an administration queue with a standard queue name and +the extension sends PCF command messages to that queue. On Windows and Unix platforms, the PCF +commands are sent is always sent to the SYSTEM.ADMIN.COMMAND.QUEUE queue. +[More details mentioned here](https://www.ibm.com/docs/en/ibm-mq/8.0.0?topic=formats-pcf-command-messages) + +By default, the PCF responses are sent to the SYSTEM.DEFAULT.MODEL.QUEUE. Using this queue causes +a temporary dynamic queue to be created. You can override the default here by using the +`modelQueueName` and `replyQueuePrefix` fields in the config.yml. +[More details mentioned here](https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q083240_.htm) + +## Metrics + +See [docs/metrics.md](docs/metrics.md). + +## Troubleshooting + +1. Error `Completion Code '2', Reason '2495'` + Normally this error occurs if the environment variables are not set up correctly for this extension to work MQ in Bindings Mode. + + If you are seeing `Failed to load the WebSphere MQ native JNI library: 'mqjbnd'`, please add the following jvm argument when starting the MA. + + -Djava.library.path=\ For eg. on Unix it could -Djava.library.path=/opt/mqm/java/lib64 for 64-bit or -Djava.library.path=/opt/mqm/java/lib for 32-bit OS + + Sometimes you also have run the setmqenv script before using the above jvm argument to start the machine agent. + + . /opt/mqm/bin/setmqenv -s + + For more details, please check this [doc](https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.1.0/com.ibm.mq.doc/zr00610_.htm) + + This might occur due to various reasons ranging from incorrect installation to applying + IBM Fix Packs, but most of the time it happens when you are trying to connect in `Bindings` + mode and machine agent is not on the same machine on which WMQ server is running. If you want + to connect to WMQ server from a remote machine then connect using `Client` mode. + + Another way to get around this issue is to avoid using the Bindings mode. Connect using CLIENT + transport type from a remote box. + +2. Error `Completion Code '2', Reason '2035'` + This could happen for various reasons but for most of the cases, for **Client** mode the + user specified in config.yml is not authorized to access the queue manager. Also sometimes + even if userid and password are correct, channel auth (CHLAUTH) for that queue manager blocks + traffics from other ips, you need to contact admin to provide you access to the queue manager. + For Bindings mode, please make sure that the MA is owned by a mqm user. + +3. `MQJE001: Completion Code '2', Reason '2195'` + This could happen in **Client** mode. Please make sure that the IBM MQ dependency jars are correctly referenced in classpath of monitor.xml + +4. `MQJE001: Completion Code '2', Reason '2400'` + This could happen if unsupported cipherSuite is provided or JRE not having/enabled unlimited jurisdiction policy files. Please check SSL Support section. + +5. If you are seeing `NoClassDefFoundError` or `ClassNotFoundException` error for any of the MQ dependency even after providing correct path in monitor.xml, then you can also try copying all the required jars in WMQMonitor (MAHome/monitors/WMQMonitor) folder and provide classpath in monitor.xml like below + + ``` + opentelemetry-ibm-mq-monitoring--all.jar;com.ibm.mq.allclient.jar + ``` + + OR + + ``` + opentelemetry-ibm-mq-monitoring--all.jar;com.ibm.mq.jar;com.ibm.mq.jmqi.jar;com.ibm.mq.commonservices.jar;com.ibm.mq.headers.jar;com.ibm.mq.pcf.jar;connector.jar;dhbcore.jar + ``` + +## Component Owners + +- [Antoine Toulme Sharma](https://github.com/atoulme), Splunk +- [Jason Plumb](https://github.com/breedx-splk), Splunk + +Learn more about component owners in [component_owners.yml](../.github.amrom.workers.devponent_owners.yml). diff --git a/ibm-mq-metrics/build.gradle.kts b/ibm-mq-metrics/build.gradle.kts new file mode 100644 index 000000000..31e191841 --- /dev/null +++ b/ibm-mq-metrics/build.gradle.kts @@ -0,0 +1,65 @@ +plugins { + application + id("com.gradleup.shadow") + id("otel.java-conventions") + id("otel.publish-conventions") +} + +description = "IBM-MQ metrics" +otelJava.moduleName.set("io.opentelemetry.contrib.ibm-mq-metrics") +application.mainClass.set("io.opentelemetry.ibm.mq.opentelemetry.Main") + +val ibmClientJar: Configuration by configurations.creating { + isCanBeResolved = true + isCanBeConsumed = false +} + +dependencies { + api("com.google.code.findbugs:jsr305:3.0.2") + api("io.swagger:swagger-annotations:1.6.16") + api("org.jetbrains:annotations:26.0.2-1") + api("com.ibm.mq:com.ibm.mq.allclient:9.4.3.1") + api("org.yaml:snakeyaml:2.5") + api("com.fasterxml.jackson.core:jackson-databind:2.20.0") + api("io.opentelemetry:opentelemetry-sdk") + api("io.opentelemetry:opentelemetry-exporter-otlp") + api("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + api("org.slf4j:slf4j-api:2.0.17") + implementation("org.slf4j:slf4j-simple:2.0.17") + testImplementation("com.google.guava:guava") + testImplementation("io.opentelemetry:opentelemetry-sdk-testing") + ibmClientJar("com.ibm.mq:com.ibm.mq.allclient:9.4.3.1") { + artifact { + name = "com.ibm.mq.allclient" + extension = "jar" + } + isTransitive = false + } +} + +testing { + suites { + val integrationTest by registering(JvmTestSuite::class) { + dependencies { + implementation("org.assertj:assertj-core:3.27.6") + implementation("io.opentelemetry:opentelemetry-sdk-testing") + implementation("com.ibm.mq:com.ibm.mq.jakarta.client:9.4.3.1") + implementation("jakarta.jms:jakarta.jms-api:3.1.0") + } + + targets { + all { + testTask.configure { + shouldRunAfter(tasks.test) + } + } + } + } + } +} + +tasks.shadowJar { + dependencies { + exclude(dependency("com.ibm.mq:com.ibm.mq.allclient")) + } +} diff --git a/ibm-mq-metrics/config.yml b/ibm-mq-metrics/config.yml new file mode 100644 index 000000000..603330f65 --- /dev/null +++ b/ibm-mq-metrics/config.yml @@ -0,0 +1,213 @@ +# This section defines the schedule at which the program will scrape metrics. +taskSchedule: + numberOfThreads: 20 + initialDelaySeconds: 0 + taskDelaySeconds: 60 + +#This is the timeout on queue metrics and channel metrics threads.Default value is 20 seconds. +#No need to change the default unless you know what you are doing. +#queueMetricsCollectionTimeoutInSeconds: 40 +#channelMetricsCollectionTimeoutInSeconds: 40 +#topicMetricsCollectionTimeoutInSeconds: 40 + +queueManagers: + - name: "QM1" + host: "localhost" + port: 1414 + + # Indicate the MaxActiveChannels as set in qm.ini, see https://www.ibm.com/docs/en/ibm-mq/9.3.x?topic=qmini-channels-stanza-file + maxActiveChannels: 4200 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Bindings" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + #channelName: "SYSTEM.ADMIN.SVRCONN" + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + #username: "" + #password: "" + + #PCF requests are always sent to SYSTEM.ADMIN.COMMAND.QUEUE. The PCF responses to these requests are sent to the default reply-to queue called + #SYSTEM.DEFAULT.MODEL.QUEUE. However, you can override this behavior and send it to a temporary dynamic queue by changing the modelQueueName and replyQueuePrefix fields. + #For more details around this https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q083240_.htm & https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.adm.doc/q020010_.htm + #modelQueueName: "" + #replyQueuePrefix: "" + + # Name of the temporary dynamic queue holding the configuration events. This queue contains information regarding the configuration of the queue manager, notable MaxChannels and MaxActiveChannels. + # If unset, the default queue name `SYSTEM.ADMIN.CONFIG.EVENT` is applied. + # Configuration events need to be enabled explicitly in the queue manager configuration. See https://www.ibm.com/docs/en/ibm-mq/9.4.x?topic=monitoring-configuration-events for reference. + #configurationQueueName: "SYSTEM.ADMIN.CONFIG.EVENT" + + # Interval in milliseconds at which the configuration events in the configuration queue can be consumed. + # By default, no events are consumed. + #consumeConfigurationEventInterval: 600000 # 10 minutes + + # Enable running a queue manager refresh request to reload its configuration and create a configuration event. + # This action is only executed if no configuration events are found when reading the configuration queue.name: + # By default, this action is disabled. + #refreshQueueManagerConfigurationEnabled: false + + #Sets the CCSID used in the message descriptor of request and response messages. The default value is MQC.MQCCSI_Q_MGR. + #To set this, please use the integer value. + #ccsid: + + #Sets the encoding used in the message descriptor of request and response messages. The default value is MQC.MQENC_NATIVE. + #To set this, please use the integer value. + #encoding: + + # IBM Cipher Suite e.g. "SSL_RSA_WITH_AES_128_CBC_SHA256".. + # For translation to IBM Cipher http://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.dev.doc/q113210_.htm + # A cipher working for IBM Cloud MQ and Temurin JDK 8 is TLS_AES_128_GCM_SHA256 + #cipherSuite: "" + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","$SYS"] + +metrics: + "ibm.mq.message.retry.count": # Number of message retries + enabled: true + "ibm.mq.status": # Channel status + enabled: true + "ibm.mq.max.sharing.conversations": # Maximum number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.current.sharing.conversations": # Current number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.byte.received": # Number of bytes received + enabled: true + "ibm.mq.byte.sent": # Number of bytes sent + enabled: true + "ibm.mq.buffers.received": # Buffers received + enabled: true + "ibm.mq.buffers.sent": # Buffers sent + enabled: true + "ibm.mq.message.count": # Message count + enabled: true + "ibm.mq.open.input.count": # Count of applications sending messages to the queue + enabled: true + "ibm.mq.open.output.count": # Count of applications consuming messages from the queue + enabled: true + "ibm.mq.high.queue.depth": # The current high queue depth + enabled: true + "ibm.mq.service.interval": # The queue service interval + enabled: true + "ibm.mq.queue.depth.full.event": # The number of full queue events + enabled: true + "ibm.mq.queue.depth.high.event": # The number of high queue events + enabled: true + "ibm.mq.queue.depth.low.event": # The number of low queue events + enabled: true + "ibm.mq.uncommitted.messages": # Number of uncommitted messages + enabled: true + "ibm.mq.oldest.msg.age": # Queue message oldest age + enabled: true + "ibm.mq.current.max.queue.filesize": # Current maximum queue file size + enabled: true + "ibm.mq.current.queue.filesize": # Current queue file size + enabled: true + "ibm.mq.instances.per.client": # Instances per client + enabled: true + "ibm.mq.message.deq.count": # Message dequeue count + enabled: true + "ibm.mq.message.enq.count": # Message enqueue count + enabled: true + "ibm.mq.queue.depth": # Current queue depth + enabled: true + "ibm.mq.service.interval.event": # Queue service interval event + enabled: true + "ibm.mq.reusable.log.size": # The amount of space occupied, in megabytes, by log extents available to be reused. + enabled: true + "ibm.mq.manager.active.channels": # The queue manager active maximum channels limit + enabled: true + "ibm.mq.restart.log.size": # Size of the log data required for restart recovery in megabytes. + enabled: true + "ibm.mq.max.queue.depth": # Maximum queue depth + enabled: true + "ibm.mq.onqtime.short_period": # Amount of time, in microseconds, that a message spent on the queue, over a short period + enabled: true + "ibm.mq.onqtime.long_period": # Amount of time, in microseconds, that a message spent on the queue, over a longer period + enabled: true + "ibm.mq.message.received.count": # Number of messages received + enabled: true + "ibm.mq.message.sent.count": # Number of messages sent + enabled: true + "ibm.mq.max.instances": # Max channel instances + enabled: true + "ibm.mq.connection.count": # Active connections count + enabled: true + "ibm.mq.manager.status": # Queue manager status + enabled: true + "ibm.mq.heartbeat": # Queue manager heartbeat + enabled: true + "ibm.mq.archive.log.size": # Queue manager archive log size + enabled: true + "ibm.mq.manager.max.active.channels": # Queue manager max active channels + enabled: true + "ibm.mq.manager.statistics.interval": # Queue manager statistics interval + enabled: true + "ibm.mq.publish.count": # Topic publication count + enabled: true + "ibm.mq.subscription.count": # Topic subscription count + enabled: true + "ibm.mq.listener.status": # Listener status + enabled: true + "ibm.mq.unauthorized.event": # Number of authentication error events + enabled: true + "ibm.mq.manager.max.handles": # Max open handles + enabled: true + +sslConnection: + trustStorePath: "" + trustStorePassword: "" + + keyStorePath: "" + keyStorePassword: "" + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.exporter.otlp.endpoint: http://localhost:4318 diff --git a/ibm-mq-metrics/docs/metrics.md b/ibm-mq-metrics/docs/metrics.md new file mode 100644 index 000000000..2cfe9047c --- /dev/null +++ b/ibm-mq-metrics/docs/metrics.md @@ -0,0 +1,817 @@ +# Produced Metrics + + +## Metric `ibm.mq.message.retry.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.retry.count` | Gauge | `{message}` | Number of message retries | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.retry.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.status` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.status` | Gauge | `1` | Channel status | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.status` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.max.sharing.conversations` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.max.sharing.conversations` | Gauge | `{conversation}` | Maximum number of conversations permitted on this channel instance. | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.max.sharing.conversations` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.current.sharing.conversations` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.current.sharing.conversations` | Gauge | `{conversation}` | Current number of conversations permitted on this channel instance. | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.current.sharing.conversations` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.byte.received` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.byte.received` | Gauge | `By` | Number of bytes received | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.byte.received` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.byte.sent` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.byte.sent` | Gauge | `By` | Number of bytes sent | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.byte.sent` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.buffers.received` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.buffers.received` | Gauge | `{buffer}` | Buffers received | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.buffers.received` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.buffers.sent` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.buffers.sent` | Gauge | `{buffer}` | Buffers sent | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.buffers.sent` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.message.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.count` | Gauge | `{message}` | Message count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.start.time` | int | The start time of the channel as seconds since Epoch. | `1748462702` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.job.name` | string | The job name | `0000074900000003` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.open.input.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.open.input.count` | Gauge | `{application}` | Count of applications sending messages to the queue | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.open.input.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [1] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[1] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.open.output.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.open.output.count` | Gauge | `{application}` | Count of applications consuming messages from the queue | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.open.output.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [2] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[2] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.high.queue.depth` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.high.queue.depth` | Gauge | `{percent}` | The current high queue depth | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.high.queue.depth` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [3] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[3] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.service.interval` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.service.interval` | Gauge | `{percent}` | The queue service interval | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.service.interval` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [4] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[4] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.queue.depth.full.event` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.queue.depth.full.event` | Counter | `{event}` | The number of full queue events | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.queue.depth.full.event` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [5] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[5] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.queue.depth.high.event` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.queue.depth.high.event` | Counter | `{event}` | The number of high queue events | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.queue.depth.high.event` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [6] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[6] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.queue.depth.low.event` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.queue.depth.low.event` | Counter | `{event}` | The number of low queue events | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.queue.depth.low.event` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [7] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[7] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.uncommitted.messages` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.uncommitted.messages` | Gauge | `{message}` | Number of uncommitted messages | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.uncommitted.messages` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [8] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[8] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.oldest.msg.age` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.oldest.msg.age` | Gauge | `microseconds` | Queue message oldest age | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.oldest.msg.age` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [9] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[9] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.current.max.queue.filesize` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.current.max.queue.filesize` | Gauge | `By` | Current maximum queue file size | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.current.max.queue.filesize` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [10] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[10] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.current.queue.filesize` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.current.queue.filesize` | Gauge | `By` | Current queue file size | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.current.queue.filesize` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [11] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[11] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.instances.per.client` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.instances.per.client` | Gauge | `{instance}` | Instances per client | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.instances.per.client` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [12] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[12] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.message.deq.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.deq.count` | Gauge | `{message}` | Message dequeue count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.deq.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [13] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[13] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.message.enq.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.enq.count` | Gauge | `{message}` | Message enqueue count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.enq.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [14] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[14] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.queue.depth` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.queue.depth` | Gauge | `{message}` | Current queue depth | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.queue.depth` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [15] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[15] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.service.interval.event` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.service.interval.event` | Gauge | `1` | Queue service interval event | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.service.interval.event` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [16] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[16] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.reusable.log.size` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.reusable.log.size` | Gauge | `By` | The amount of space occupied, in megabytes, by log extents available to be reused. | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.reusable.log.size` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.manager.active.channels` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.manager.active.channels` | Gauge | `{channel}` | The queue manager active maximum channels limit | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.manager.active.channels` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.restart.log.size` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.restart.log.size` | Gauge | `By` | Size of the log data required for restart recovery in megabytes. | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.restart.log.size` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.max.queue.depth` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.max.queue.depth` | Gauge | `{message}` | Maximum queue depth | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.max.queue.depth` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [17] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[17] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.onqtime.short_period` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.onqtime.short_period` | Gauge | `microseconds` | Amount of time, in microseconds, that a message spent on the queue, over a short period | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.onqtime.short_period` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [18] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[18] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.onqtime.long_period` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.onqtime.long_period` | Gauge | `microseconds` | Amount of time, in microseconds, that a message spent on the queue, over a longer period | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.onqtime.long_period` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.type` | string | The queue type | `local-normal` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [19] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[19] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.message.received.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.received.count` | Gauge | `{message}` | Number of messages received | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.received.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.message.sent.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.message.sent.count` | Gauge | `{message}` | Number of messages sent | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.message.sent.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.max.instances` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.max.instances` | Gauge | `{instance}` | Max channel instances | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.max.instances` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.channel.name` | string | The name of the channel | `DEV.ADMIN.SVRCONN` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.channel.type` | string | The type of the channel | `server-connection`; `cluster-receiver`; `amqp` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.connection.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.connection.count` | Gauge | `{connection}` | Active connections count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.connection.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.manager.status` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.manager.status` | Gauge | `1` | Queue manager status | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.manager.status` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.heartbeat` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.heartbeat` | Gauge | `1` | Queue manager heartbeat | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.heartbeat` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.archive.log.size` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.archive.log.size` | Gauge | `By` | Queue manager archive log size | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.archive.log.size` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.manager.max.active.channels` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.manager.max.active.channels` | Gauge | `{channel}` | Queue manager max active channels | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.manager.max.active.channels` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.manager.statistics.interval` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.manager.statistics.interval` | Gauge | `1` | Queue manager statistics interval | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.manager.statistics.interval` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.publish.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.publish.count` | Gauge | `{publication}` | Topic publication count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.publish.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [20] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[20] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.subscription.count` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.subscription.count` | Gauge | `{subscription}` | Topic subscription count | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.subscription.count` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `messaging.destination.name` | string | The system-specific name of the messaging operation. [21] | `dev/` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[21] `messaging.destination.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.listener.status` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.listener.status` | Gauge | `1` | Listener status | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.listener.status` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.listener.name` | string | The listener name | `listener` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.unauthorized.event` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.unauthorized.event` | Counter | `{event}` | Number of authentication error events | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.unauthorized.event` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `service.name` | string | Logical name of the service. [22] | `Wordle`; `JMSService` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `user.name` | string | Short name or login/username of the user. [23] | `foo`; `root` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + +**[22] `service.name`:** This is duplicated from otel semantic-conventions. + +**[23] `user.name`:** This is duplicated from otel semantic-conventions. + + + +## Metric `ibm.mq.manager.max.handles` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.manager.max.handles` | Gauge | `{event}` | Max open handles | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.manager.max.handles` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | + + + +## Metric `ibm.mq.connection.errors` + +| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `ibm.mq.connection.errors` | Counter | `{errors}` | Number of connection errors | ![Development](https://img.shields.io/badge/-development-blue) | + + +### `ibm.mq.connection.errors` Attributes + +| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +| `error.code` | string | The reason code associated with an error | `2038`; `2543`; `2009` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | +| `ibm.mq.queue.manager` | string | The name of the IBM queue manager | `MQ1` | `Required` | ![Development](https://img.shields.io/badge/-development-blue) | diff --git a/ibm-mq-metrics/model/attributes.yaml b/ibm-mq-metrics/model/attributes.yaml new file mode 100644 index 000000000..4e59f631b --- /dev/null +++ b/ibm-mq-metrics/model/attributes.yaml @@ -0,0 +1,81 @@ +groups: + - id: shared.attributes + type: attribute_group + brief: Attributes of metrics. + attributes: + - id: ibm.mq.queue.manager + type: string + brief: > + The name of the IBM queue manager + stability: development + examples: ["MQ1"] + - id: messaging.destination.name + type: string + brief: > + The system-specific name of the messaging operation. + note: This is duplicated from otel semantic-conventions. + stability: development + examples: [ "dev/" ] + - id: ibm.mq.channel.name + type: string + brief: > + The name of the channel + stability: development + examples: [ "DEV.ADMIN.SVRCONN" ] + - id: ibm.mq.channel.type + type: string + brief: > + The type of the channel + stability: development + examples: [ "server-connection", "cluster-receiver", "amqp" ] + - id: ibm.mq.job.name + type: string + brief: > + The job name + stability: development + examples: [ "0000074900000003" ] + - id: ibm.mq.channel.start.time + type: int + brief: > + The start time of the channel as seconds since Epoch. + stability: development + examples: [ 1748462702 ] + # Use the messaging.destination.name attribute instead? +# - id: queue.name +# type: string +# brief: > +# The queue name +# stability: development +# examples: [ "DEV.DEAD.LETTER.QUEUE" ] + - id: ibm.mq.queue.type + type: string + brief: > + The queue type + stability: development + examples: [ "local-normal" ] + - id: ibm.mq.listener.name + type: string + brief: > + The listener name + stability: development + examples: [ "listener" ] + - id: user.name + type: string + brief: > + Short name or login/username of the user. + note: This is duplicated from otel semantic-conventions. + stability: development + examples: [ "foo", "root" ] + - id: service.name + type: string + brief: > + Logical name of the service. + note: This is duplicated from otel semantic-conventions. + stability: development + examples: [ "Wordle", "JMSService" ] + - id: error.code + type: string + brief: > + The reason code associated with an error + stability: development + examples: [ "2038", "2543", "2009" ] diff --git a/ibm-mq-metrics/model/metrics.yaml b/ibm-mq-metrics/model/metrics.yaml new file mode 100644 index 000000000..9e4e72fee --- /dev/null +++ b/ibm-mq-metrics/model/metrics.yaml @@ -0,0 +1,623 @@ +groups: + - id: ibm.mq.message.retry.count + type: metric + metric_name: ibm.mq.message.retry.count + stability: development + brief: "Number of message retries" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.status + type: metric + metric_name: ibm.mq.status + stability: development + brief: "Channel status" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.max.sharing.conversations + type: metric + metric_name: ibm.mq.max.sharing.conversations + stability: development + brief: "Maximum number of conversations permitted on this channel instance." + instrument: gauge + unit: "{conversation}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.current.sharing.conversations + type: metric + metric_name: ibm.mq.current.sharing.conversations + stability: development + unit: "{conversation}" + brief: "Current number of conversations permitted on this channel instance." + instrument: gauge + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.byte.received + type: metric + metric_name: ibm.mq.byte.received + stability: development + brief: "Number of bytes received" + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.byte.sent + type: metric + metric_name: ibm.mq.byte.sent + stability: development + brief: "Number of bytes sent" + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.buffers.received + type: metric + metric_name: ibm.mq.buffers.received + stability: development + brief: "Buffers received" + instrument: gauge + unit: "{buffer}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.buffers.sent + type: metric + metric_name: ibm.mq.buffers.sent + stability: development + brief: "Buffers sent" + unit: "{buffer}" + instrument: gauge + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.message.count + type: metric + metric_name: ibm.mq.message.count + stability: development + brief: "Message count" + unit: "{message}" + instrument: gauge + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.job.name + requirement_level: required + - ref: ibm.mq.channel.start.time + requirement_level: required + - id: ibm.mq.open.input.count + type: metric + metric_name: ibm.mq.open.input.count + stability: development + brief: "Count of applications sending messages to the queue" + instrument: gauge + unit: "{application}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.open.output.count + type: metric + metric_name: ibm.mq.open.output.count + stability: development + brief: "Count of applications consuming messages from the queue" + instrument: gauge + unit: "{application}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.high.queue.depth + type: metric + metric_name: ibm.mq.high.queue.depth + stability: development + brief: "The current high queue depth" + instrument: gauge + unit: "{percent}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.service.interval + type: metric + metric_name: ibm.mq.service.interval + stability: development + brief: "The queue service interval" + instrument: gauge + unit: "{percent}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.queue.depth.full.event + type: metric + metric_name: ibm.mq.queue.depth.full.event + stability: development + brief: "The number of full queue events" + instrument: counter + unit: "{event}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - id: ibm.mq.queue.depth.high.event + type: metric + metric_name: ibm.mq.queue.depth.high.event + stability: development + brief: "The number of high queue events" + instrument: counter + unit: "{event}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - id: ibm.mq.queue.depth.low.event + type: metric + metric_name: ibm.mq.queue.depth.low.event + stability: development + brief: "The number of low queue events" + instrument: counter + unit: "{event}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - id: ibm.mq.uncommitted.messages + type: metric + metric_name: ibm.mq.uncommitted.messages + stability: development + brief: "Number of uncommitted messages" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.oldest.msg.age + type: metric + metric_name: ibm.mq.oldest.msg.age + stability: development + brief: "Queue message oldest age" + instrument: gauge + unit: "microseconds" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.current.max.queue.filesize + type: metric + metric_name: ibm.mq.current.max.queue.filesize + stability: development + brief: "Current maximum queue file size" + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.current.queue.filesize + type: metric + metric_name: ibm.mq.current.queue.filesize + stability: development + brief: "Current queue file size" + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.instances.per.client + type: metric + metric_name: ibm.mq.instances.per.client + stability: development + brief: "Instances per client" + instrument: gauge + unit: "{instance}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.message.deq.count + type: metric + metric_name: ibm.mq.message.deq.count + stability: development + brief: "Message dequeue count" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.message.enq.count + type: metric + metric_name: ibm.mq.message.enq.count + stability: development + brief: "Message enqueue count" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.queue.depth + type: metric + metric_name: ibm.mq.queue.depth + stability: development + brief: "Current queue depth" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.service.interval.event + type: metric + metric_name: ibm.mq.service.interval.event + stability: development + brief: "Queue service interval event" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.reusable.log.size + type: metric + metric_name: ibm.mq.reusable.log.size + stability: development + brief: "The amount of space occupied, in megabytes, by log extents available to be reused." + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.manager.active.channels + type: metric + metric_name: ibm.mq.manager.active.channels + stability: development + brief: "The queue manager active maximum channels limit" + instrument: gauge + unit: "{channel}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.restart.log.size + type: metric + metric_name: ibm.mq.restart.log.size + stability: development + brief: "Size of the log data required for restart recovery in megabytes." + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.max.queue.depth + type: metric + metric_name: ibm.mq.max.queue.depth + stability: development + brief: "Maximum queue depth" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.onqtime.short_period + type: metric + metric_name: ibm.mq.onqtime.short_period + stability: development + brief: "Amount of time, in microseconds, that a message spent on the queue, over a short period" + instrument: gauge + unit: "microseconds" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.onqtime.long_period + type: metric + metric_name: ibm.mq.onqtime.long_period + stability: development + brief: "Amount of time, in microseconds, that a message spent on the queue, over a longer period" + instrument: gauge + unit: "microseconds" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - ref: ibm.mq.queue.type + requirement_level: required + - id: ibm.mq.message.received.count + type: metric + metric_name: ibm.mq.message.received.count + stability: development + brief: "Number of messages received" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.message.sent.count + type: metric + metric_name: ibm.mq.message.sent.count + stability: development + brief: "Number of messages sent" + instrument: gauge + unit: "{message}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.max.instances + type: metric + metric_name: ibm.mq.max.instances + stability: development + brief: "Max channel instances" + instrument: gauge + unit: "{instance}" + attributes: + - ref: ibm.mq.channel.name + requirement_level: required + - ref: ibm.mq.channel.type + requirement_level: required + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.connection.count + type: metric + metric_name: ibm.mq.connection.count + stability: development + brief: "Active connections count" + instrument: gauge + unit: "{connection}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.manager.status + type: metric + metric_name: ibm.mq.manager.status + stability: development + brief: "Queue manager status" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.heartbeat + type: metric + metric_name: ibm.mq.heartbeat + stability: development + brief: "Queue manager heartbeat" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.archive.log.size + type: metric + metric_name: ibm.mq.archive.log.size + stability: development + brief: "Queue manager archive log size" + instrument: gauge + unit: "By" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.manager.max.active.channels + type: metric + metric_name: ibm.mq.manager.max.active.channels + stability: development + brief: "Queue manager max active channels" + instrument: gauge + unit: "{channel}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.manager.statistics.interval + type: metric + metric_name: ibm.mq.manager.statistics.interval + stability: development + brief: "Queue manager statistics interval" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.publish.count + type: metric + metric_name: ibm.mq.publish.count + stability: development + brief: "Topic publication count" + instrument: gauge + unit: "{publication}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - id: ibm.mq.subscription.count + type: metric + metric_name: ibm.mq.subscription.count + stability: development + brief: "Topic subscription count" + instrument: gauge + unit: "{subscription}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: messaging.destination.name + requirement_level: required + - id: ibm.mq.listener.status + type: metric + metric_name: ibm.mq.listener.status + stability: development + brief: "Listener status" + instrument: gauge + unit: "1" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: ibm.mq.listener.name + requirement_level: required + - id: ibm.mq.unauthorized.event + type: metric + metric_name: ibm.mq.unauthorized.event + stability: development + brief: "Number of authentication error events" + instrument: counter + unit: "{event}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: user.name + requirement_level: required + - ref: service.name + requirement_level: required + - id: ibm.mq.manager.max.handles + type: metric + metric_name: ibm.mq.manager.max.handles + stability: development + brief: "Max open handles" + instrument: gauge + unit: "{event}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - id: ibm.mq.connection.errors + type: metric + metric_name: ibm.mq.connection.errors + stability: development + brief: "Number of connection errors" + instrument: counter + unit: "{errors}" + attributes: + - ref: ibm.mq.queue.manager + requirement_level: required + - ref: error.code + requirement_level: required diff --git a/ibm-mq-metrics/model/registry_manifest.yaml b/ibm-mq-metrics/model/registry_manifest.yaml new file mode 100644 index 000000000..1fb92e0cb --- /dev/null +++ b/ibm-mq-metrics/model/registry_manifest.yaml @@ -0,0 +1,3 @@ +name: IBM MQ Monitoring +semconv_version: v1.34.0 +schema_base_url: https://www.github.com/open-telemetry/opentelemetry-java-contrib/ibm-mq-metrics diff --git a/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/JakartaPutGet.java b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/JakartaPutGet.java new file mode 100644 index 000000000..ef40efa44 --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/JakartaPutGet.java @@ -0,0 +1,293 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.integration.tests; + +import com.ibm.mq.MQException; +import com.ibm.mq.MQQueueManager; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import com.ibm.msg.client.jakarta.jms.JmsConnectionFactory; +import com.ibm.msg.client.jakarta.jms.JmsFactoryFactory; +import com.ibm.msg.client.jakarta.wmq.WMQConstants; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.util.WmqUtil; +import jakarta.jms.Destination; +import jakarta.jms.JMSConsumer; +import jakarta.jms.JMSContext; +import jakarta.jms.JMSException; +import jakarta.jms.JMSProducer; +import jakarta.jms.JMSRuntimeException; +import jakarta.jms.TextMessage; + +/** + * This code was adapted from https://github.com/ibm-messaging/mq-dev-samples/. + * + *

A minimal and simple application for Point-to-point messaging. + * + *

Application makes use of fixed literals, any customisations will require re-compilation of + * this source file. Application assumes that the named queue is empty prior to a run. + * + *

Notes: + * + *

API type: Jakarta API (JMS v3.0, simplified domain) + * + *

Messaging domain: Point-to-point + * + *

Provider type: IBM MQ + * + *

Connection mode: Client connection + * + *

JNDI in use: No + */ +public final class JakartaPutGet { + + private JakartaPutGet() {} + + public static void createQueue(QueueManager manager, String name, int maxDepth) { + MQQueueManager ibmQueueManager = WmqUtil.connectToQueueManager(manager); + PCFMessageAgent agent = WmqUtil.initPcfMessageAgent(manager, ibmQueueManager); + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_CREATE_Q); + request.addParameter(com.ibm.mq.constants.CMQC.MQCA_Q_NAME, name); + request.addParameter(CMQC.MQIA_Q_TYPE, CMQC.MQQT_LOCAL); + + request.addParameter(CMQC.MQIA_MAX_Q_DEPTH, maxDepth); + // these parameters are indicated in percentage of max depth. + request.addParameter(CMQC.MQIA_Q_DEPTH_HIGH_LIMIT, 75); + request.addParameter(CMQC.MQIA_Q_DEPTH_LOW_LIMIT, 20); + request.addParameter(CMQC.MQIA_Q_DEPTH_HIGH_EVENT, CMQCFC.MQEVR_ENABLED); + request.addParameter(CMQC.MQIA_Q_DEPTH_LOW_EVENT, CMQCFC.MQEVR_ENABLED); + request.addParameter(CMQC.MQIA_Q_DEPTH_MAX_EVENT, CMQCFC.MQEVR_ENABLED); + try { + agent.send(request); + } catch (PCFException e) { + if (e.reasonCode == CMQCFC.MQRCCF_OBJECT_ALREADY_EXISTS) { + return; + } + throw new RuntimeException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * @param manager Queue manager configuration + * @param queueName Queue that the application uses to put and get messages to and from + * @param numberOfMessages Number of messages to send + * @param sleepIntervalMs Sleep interval in ms + */ + public static void runPutGet( + QueueManager manager, String queueName, int numberOfMessages, int sleepIntervalMs) { + + createQueue(manager, queueName, 100000); + JMSContext context = null; + JMSContext senderContext = null; + try { + // Create a connection factory + JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.JAKARTA_WMQ_PROVIDER); + JmsConnectionFactory cf = ff.createConnectionFactory(); + + // Set the properties + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, manager.getHost()); + cf.setIntProperty(WMQConstants.WMQ_PORT, manager.getPort()); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, manager.getChannelName()); + cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, manager.getName()); + cf.setStringProperty(WMQConstants.WMQ_APPLICATIONNAME, "JakartaPutGet (Jakarta)"); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); + cf.setStringProperty(WMQConstants.USERID, manager.getUsername()); + cf.setStringProperty(WMQConstants.PASSWORD, manager.getPassword()); + // cf.setStringProperty(WMQConstants.WMQ_SSL_CIPHER_SUITE, "*TLS12ORHIGHER"); + // cf.setIntProperty(MQConstants.CERTIFICATE_VALIDATION_POLICY, + // MQConstants.MQ_CERT_VAL_POLICY_NONE); + + // Create Jakarta objects + context = cf.createContext(); + Destination destination = context.createQueue("queue:///" + queueName); + + JMSConsumer consumer = context.createConsumer(destination); + consumer.setMessageListener(message -> {}); + + senderContext = cf.createContext(); + Destination senderDestination = senderContext.createQueue("queue:///" + queueName); + + for (int i = 0; i < numberOfMessages; i++) { + long uniqueNumber = System.currentTimeMillis() % 1000; + TextMessage message = + senderContext.createTextMessage("Your lucky number today is " + uniqueNumber); + message.setIntProperty(WMQConstants.JMS_IBM_CHARACTER_SET, 37); + JMSProducer producer = senderContext.createProducer(); + producer.send(senderDestination, message); + + Thread.sleep(sleepIntervalMs); + } + + } catch (JMSException | InterruptedException jmsex) { + throw new RuntimeException(jmsex); + } finally { + if (context != null) { + context.close(); + } + if (senderContext != null) { + senderContext.close(); + } + } + } + + /** + * Send a number of messages to the queue. + * + * @param manager Queue manager configuration + * @param queueName Queue that the application uses to put and get messages to and from + * @param numberOfMessages Number of messages to send + */ + public static void sendMessages(QueueManager manager, String queueName, int numberOfMessages) { + + createQueue(manager, queueName, 1000); + JMSContext context = null; + try { + // Create a connection factory + JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.JAKARTA_WMQ_PROVIDER); + JmsConnectionFactory cf = ff.createConnectionFactory(); + + // Set the properties + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, manager.getHost()); + cf.setIntProperty(WMQConstants.WMQ_PORT, manager.getPort()); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, manager.getChannelName()); + cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, manager.getName()); + cf.setStringProperty(WMQConstants.WMQ_APPLICATIONNAME, "Message Sender"); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); + cf.setStringProperty(WMQConstants.USERID, manager.getUsername()); + cf.setStringProperty(WMQConstants.PASSWORD, manager.getPassword()); + // cf.setStringProperty(WMQConstants.WMQ_SSL_CIPHER_SUITE, "*TLS12ORHIGHER"); + // cf.setIntProperty(MQConstants.CERTIFICATE_VALIDATION_POLICY, + // MQConstants.MQ_CERT_VAL_POLICY_NONE); + + // Create Jakarta objects + context = cf.createContext(); + Destination destination = context.createQueue("queue:///" + queueName); + + for (int i = 0; i < numberOfMessages; i++) { + long uniqueNumber = System.currentTimeMillis() % 1000; + TextMessage message = + context.createTextMessage("Your lucky number today is " + uniqueNumber); + message.setIntProperty(WMQConstants.JMS_IBM_CHARACTER_SET, 37); + JMSProducer producer = context.createProducer(); + producer.send(destination, message); + } + + } catch (JMSException e) { + throw new RuntimeException(e); + } catch (JMSRuntimeException e) { + if (e.getCause() instanceof MQException) { + MQException mqe = (MQException) e.getCause(); + if (mqe.getReason() == 2053) { // queue is full + return; + } + } + throw new RuntimeException(e); + } finally { + if (context != null) { + context.close(); + } + } + } + + /** + * Reads all the messages of the queue. + * + * @param manager Queue manager configuration + * @param queueName Queue that the application uses to put and get messages to and from + */ + public static void readMessages(QueueManager manager, String queueName) { + JMSContext context = null; + try { + // Create a connection factory + JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.JAKARTA_WMQ_PROVIDER); + JmsConnectionFactory cf = ff.createConnectionFactory(); + + // Set the properties + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, manager.getHost()); + cf.setIntProperty(WMQConstants.WMQ_PORT, manager.getPort()); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, manager.getChannelName()); + cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, manager.getName()); + cf.setStringProperty(WMQConstants.WMQ_APPLICATIONNAME, "Message Receiver"); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); + cf.setStringProperty(WMQConstants.USERID, manager.getUsername()); + cf.setStringProperty(WMQConstants.PASSWORD, manager.getPassword()); + // cf.setStringProperty(WMQConstants.WMQ_SSL_CIPHER_SUITE, "*TLS12ORHIGHER"); + // cf.setIntProperty(MQConstants.CERTIFICATE_VALIDATION_POLICY, + // MQConstants.MQ_CERT_VAL_POLICY_NONE); + + // Create Jakarta objects + context = cf.createContext(); + Destination destination = context.createQueue("queue:///" + queueName); + + JMSConsumer consumer = context.createConsumer(destination); // autoclosable + while (consumer.receiveBody(String.class, 100) != null) {} + + } catch (JMSException e) { + throw new RuntimeException(e); + } catch (JMSRuntimeException e) { + if (e.getCause() instanceof MQException) { + MQException mqe = (MQException) e.getCause(); + if (mqe.getReason() == CMQC.MQRC_NO_MSG_AVAILABLE) { // out of messages, we read them all. + return; + } + } + throw new RuntimeException(e); + } finally { + if (context != null) { + context.close(); + } + } + } + + public static void tryLoginWithBadPassword(QueueManager manager) { + + JMSContext context = null; + try { + // Create a connection factory + JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.JAKARTA_WMQ_PROVIDER); + JmsConnectionFactory cf = ff.createConnectionFactory(); + + // Set the properties + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, manager.getHost()); + cf.setIntProperty(WMQConstants.WMQ_PORT, manager.getPort()); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, manager.getChannelName()); + cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, manager.getName()); + cf.setStringProperty(WMQConstants.WMQ_APPLICATIONNAME, "Bad Password"); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); + cf.setStringProperty(WMQConstants.USERID, manager.getUsername()); + cf.setStringProperty(WMQConstants.PASSWORD, "badpassword"); + // cf.setStringProperty(WMQConstants.WMQ_SSL_CIPHER_SUITE, "*TLS12ORHIGHER"); + // cf.setIntProperty(MQConstants.CERTIFICATE_VALIDATION_POLICY, + // MQConstants.MQ_CERT_VAL_POLICY_NONE); + + // Create Jakarta objects + context = cf.createContext(); + } catch (JMSException e) { + throw new RuntimeException(e); + } catch (JMSRuntimeException e) { + if (e.getCause() instanceof MQException) { + MQException mqe = (MQException) e.getCause(); + if (mqe.getReason() == 2035) { // bad password + return; + } + } + throw new RuntimeException(e); + } finally { + if (context != null) { + context.close(); + } + } + } +} diff --git a/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/TestWMQMonitor.java b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/TestWMQMonitor.java new file mode 100644 index 000000000..8221c9353 --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/TestWMQMonitor.java @@ -0,0 +1,58 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.integration.tests; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.WmqMonitor; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; + +/** + * The TestWMQMonitor class extends the WMQMonitor class and provides a test implementation of the + * WebSphere MQ monitoring functionality. It is intended for internal integration test purposes and + * facilitates custom configuration through a test configuration file and a test metric write + * helper. + */ +class TestWMQMonitor { + + private final ConfigWrapper config; + private final ExecutorService threadPool; + private final Meter meter; + + TestWMQMonitor(ConfigWrapper config, Meter meter, ExecutorService service) { + this.config = config; + this.threadPool = service; + this.meter = meter; + } + + /** + * Executes a test run for monitoring WebSphere MQ queue managers based on the provided + * configuration "testConfigFile". + * + *

The method retrieves "queueManagers" from the yml configuration file and uses a custom + * MetricWriteHelper if provided, initializes a TasksExecutionServiceProvider, and executes the + * WMQMonitorTask + */ + void runTest() { + List> queueManagers = config.getQueueManagers(); + assertThat(queueManagers).isNotNull(); + ObjectMapper mapper = new ObjectMapper(); + + WmqMonitor wmqTask = new WmqMonitor(config, threadPool, meter); + + // we override this helper to pass in our opentelemetry helper instead. + for (Map queueManager : queueManagers) { + QueueManager qManager = mapper.convertValue(queueManager, QueueManager.class); + wmqTask.run(qManager); + } + } +} diff --git a/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/WMQMonitorIntegrationTest.java b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/WMQMonitorIntegrationTest.java new file mode 100644 index 000000000..853a54d85 --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/java/io/opentelemetry/ibm/mq/integration/tests/WMQMonitorIntegrationTest.java @@ -0,0 +1,315 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.integration.tests; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.MESSAGING_DESTINATION_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.MQQueueManager; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.ibm.mq.opentelemetry.Main; +import io.opentelemetry.ibm.mq.util.WmqUtil; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.io.File; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Integration Test for WMQMonitor */ +@Disabled +class WMQMonitorIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(WMQMonitorIntegrationTest.class); + + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + private static final ExecutorService service = + Executors.newFixedThreadPool( + 4, /* one gets burned with our @BeforeAll message uzi, 4 is faster than 2 */ + r -> { + Thread thread = new Thread(r); + thread.setUncaughtExceptionHandler( + (t, e) -> { + logger.error("Uncaught exception", e); + fail(e.getMessage()); + }); + thread.setDaemon(true); + thread.setName("WMQMonitorIntegrationTest"); + return thread; + }); + + private static QueueManager getQueueManagerConfig() throws Exception { + String configFile = getConfigFile("conf/test-config.yml"); + ConfigWrapper wrapper = ConfigWrapper.parse(configFile); + Map queueManagerConfig = wrapper.getQueueManagers().get(0); + ObjectMapper mapper = new ObjectMapper(); + return mapper.convertValue(queueManagerConfig, QueueManager.class); + } + + @NotNull + private static String getConfigFile(String resourcePath) throws URISyntaxException { + URL resource = WMQMonitorIntegrationTest.class.getClassLoader().getResource(resourcePath); + if (resource == null) { + throw new IllegalArgumentException("file not found!"); + } + + File file = Paths.get(resource.toURI()).toFile(); + logger.info("Config file: {}", file.getAbsolutePath()); + return file.getAbsolutePath(); + } + + private static void configureQueueManager(QueueManager manager) { + MQQueueManager ibmQueueManager = WmqUtil.connectToQueueManager(manager); + PCFMessageAgent agent = WmqUtil.initPcfMessageAgent(manager, ibmQueueManager); + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_CHANGE_Q_MGR); + // turn on emitting authority events + request.addParameter(CMQC.MQIA_AUTHORITY_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting configuration events + request.addParameter(CMQC.MQIA_CONFIGURATION_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting channel auto-definition events + request.addParameter(CMQC.MQIA_CHANNEL_AUTO_DEF_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting channel events + request.addParameter(CMQC.MQIA_CHANNEL_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting command events + request.addParameter(CMQC.MQIA_COMMAND_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting inhibit events + request.addParameter(CMQC.MQIA_INHIBIT_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting local events + request.addParameter(CMQC.MQIA_LOCAL_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting performance events + request.addParameter(CMQC.MQIA_PERFORMANCE_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting remote events + request.addParameter(CMQC.MQIA_REMOTE_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting SSL events + request.addParameter(CMQC.MQIA_SSL_EVENT, CMQCFC.MQEVR_ENABLED); + // turn on emitting start/stop events + request.addParameter(CMQC.MQIA_START_STOP_EVENT, CMQCFC.MQEVR_ENABLED); + try { + agent.send(request); + } catch (Exception e) { + if (e instanceof PCFException) { + PCFMessage[] msgs = (PCFMessage[]) ((PCFException) e).exceptionSource; + for (PCFMessage msg : msgs) { + logger.error(msg.toString()); + } + } + throw new RuntimeException(e); + } + } + + @BeforeAll + public static void sendClientMessages() throws Exception { + QueueManager qManager = getQueueManagerConfig(); + configureQueueManager(qManager); + + // create a queue and fill it up past its capacity. + JakartaPutGet.createQueue(qManager, "smallqueue", 10); + + JakartaPutGet.runPutGet(qManager, "myqueue", 10, 1); + + Future ignored = + service.submit(() -> JakartaPutGet.runPutGet(qManager, "myqueue", 1000000, 100)); + } + + @AfterAll + public static void stopSendingClientMessages() throws Exception { + QueueManager qManager = getQueueManagerConfig(); + configureQueueManager(qManager); + + service.shutdown(); + } + + @BeforeEach + void setUpEvents() throws Exception { + QueueManager qManager = getQueueManagerConfig(); + // try to login with a bad password: + JakartaPutGet.tryLoginWithBadPassword(qManager); + + JakartaPutGet.sendMessages(qManager, "smallqueue", 1); + Thread.sleep(1000); + JakartaPutGet.sendMessages(qManager, "smallqueue", 8); + Thread.sleep(1000); + JakartaPutGet.sendMessages(qManager, "smallqueue", 5); + } + + @AfterEach + void clearQueue() throws Exception { + // clear the full queue. + JakartaPutGet.readMessages(getQueueManagerConfig(), "smallqueue"); + } + + @Test + void test_monitor_with_full_config() throws Exception { + String configFile = getConfigFile("conf/test-config.yml"); + + ConfigWrapper config = ConfigWrapper.parse(configFile); + Meter meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + TestWMQMonitor monitor = new TestWMQMonitor(config, meter, service); + monitor.runTest(); + + List data = otelTesting.getMetrics(); + Map metrics = new HashMap<>(); + for (MetricData metricData : data) { + metrics.put(metricData.getName(), metricData); + } + Set metricNames = metrics.keySet(); + // this value is read from the active channels count: + assertThat(metricNames).contains("ibm.mq.manager.active.channels"); + // this value is read from the configuration queue. + assertThat(metricNames).contains("ibm.mq.manager.max.handles"); + // this value is read from the queue manager events, for unauthorized events. + assertThat(metricNames).contains("ibm.mq.unauthorized.event"); + // this value is read from the performance event queue. + assertThat(metricNames).contains("ibm.mq.queue.depth.full.event"); + // this value is read from the performance event queue. + assertThat(metricNames).contains("ibm.mq.queue.depth.high.event"); + assertThat(metricNames).contains("ibm.mq.queue.depth.low.event"); + // reads a value from the heartbeat gauge + assertThat(metricNames).contains("ibm.mq.heartbeat"); + assertThat(metricNames).contains("ibm.mq.oldest.msg.age"); + if (metrics.get("ibm.mq.oldest.msg.age") != null) { + Set queueNames = + metrics.get("ibm.mq.oldest.msg.age").getLongGaugeData().getPoints().stream() + .map(pt -> pt.getAttributes().get(MESSAGING_DESTINATION_NAME)) + .collect(Collectors.toSet()); + assertThat(queueNames).contains("smallqueue"); + } + // make sure we get MQ manager status + assertThat(metricNames).contains("ibm.mq.manager.status"); + if (metrics.get("ibm.mq.manager.status") != null) { + Set queueManagers = + metrics.get("ibm.mq.manager.status").getLongGaugeData().getPoints().stream() + .map(pt -> pt.getAttributes().get(IBM_MQ_QUEUE_MANAGER)) + .collect(Collectors.toSet()); + assertThat(queueManagers).contains("QM1"); + } + + assertThat(metricNames).contains("ibm.mq.onqtime.2"); + if (metrics.get("ibm.mq.onqtime.2") != null) { + Set queueNames = + metrics.get("ibm.mq.onqtime.2").getLongGaugeData().getPoints().stream() + .map(pt -> pt.getAttributes().get(MESSAGING_DESTINATION_NAME)) + .collect(Collectors.toSet()); + assertThat(queueNames).contains("smallqueue"); + Set queueManagers = + metrics.get("ibm.mq.manager.status").getLongGaugeData().getPoints().stream() + .map(pt -> pt.getAttributes().get(IBM_MQ_QUEUE_MANAGER)) + .collect(Collectors.toSet()); + assertThat(queueManagers).contains("QM1"); + // TODO: Add more asserts about data values, units, attributes, etc, not just names + } + } + + @Test + void test_wmqmonitor() throws Exception { + String configFile = getConfigFile("conf/test-queuemgr-config.yml"); + ConfigWrapper config = ConfigWrapper.parse(configFile); + Meter meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + + TestWMQMonitor monitor = new TestWMQMonitor(config, meter, service); + monitor.runTest(); + // TODO: Wait why are there no asserts here? + } + + @Test + void test_otlphttp() throws Exception { + ConfigWrapper config = + ConfigWrapper.parse(WMQMonitorIntegrationTest.getConfigFile("conf/test-config.yml")); + ScheduledExecutorService service = + Executors.newScheduledThreadPool(config.getNumberOfThreads()); + Main.run(config, service, otelTesting.getOpenTelemetry()); + CountDownLatch latch = new CountDownLatch(1); + Future ignored = service.submit(latch::countDown); + Thread.sleep(5000); // TODO: This is fragile and time consuming and should be made better + service.shutdown(); + assertTrue(service.awaitTermination(30, TimeUnit.SECONDS)); + + List data = otelTesting.getMetrics(); + Set metricNames = new HashSet<>(); + for (MetricData metricData : data) { + metricNames.add(metricData.getName()); + } + // this value is read from the active channels count: + assertThat(metricNames).contains("ibm.mq.manager.active.channels"); + // this value is read from the configuration queue. + assertThat(metricNames).contains("ibm.mq.manager.max.handles"); + // this value is read from the queue manager events, for unauthorized events. + assertThat(metricNames).contains("ibm.mq.unauthorized.event"); + // this value is read from the performance event queue. + assertThat(metricNames).contains("ibm.mq.queue.depth.full.event"); + // this value is read from the performance event queue. + assertThat(metricNames).contains("ibm.mq.queue.depth.high.event"); + assertThat(metricNames).contains("ibm.mq.queue.depth.low.event"); + // reads a value from the heartbeat gauge + assertThat(metricNames).contains("ibm.mq.heartbeat"); + } + + @Test + void test_bad_connection() throws Exception { + logger.info("\n\n\n\n\n\nRunning test: test_bad_connection"); + String configFile = getConfigFile("conf/test-bad-config.yml"); + + ConfigWrapper config = ConfigWrapper.parse(configFile); + Meter meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + TestWMQMonitor monitor = new TestWMQMonitor(config, meter, service); + monitor.runTest(); + + List data = otelTesting.getMetrics(); + + assertThat(data).isNotEmpty(); + assertThat(data).hasSize(2); + + Attributes attrs = null; + for (MetricData metricData : data) { + if ("ibm.mq.connection.errors".equals(metricData.getName())) { + attrs = metricData.getData().getPoints().stream().iterator().next().getAttributes(); + } + } + + assertThat(attrs).isNotNull(); + + String value = attrs.get(AttributeKey.stringKey("error.code")); + + assertThat(value).isEqualTo("2538"); + } +} diff --git a/ibm-mq-metrics/src/integrationTest/resources/conf/test-bad-config.yml b/ibm-mq-metrics/src/integrationTest/resources/conf/test-bad-config.yml new file mode 100644 index 000000000..e839e54ed --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/resources/conf/test-bad-config.yml @@ -0,0 +1,302 @@ +queueManagers: + - name: "QM1" + host: "localhost" + port: 1417 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Client" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + channelName: DEV.ADMIN.SVRCONN + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + username: "admin" + password: "passw0rd" + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","$SYS"] + +mqMetrics: + # This Object will extract queue manager metrics + - metricsType: "queueMgrMetrics" + metrics: + include: + - Status: + alias: "Status" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_Q_MGR_STATUS" + aggregationType: "OBSERVATION" + timeRollUpType: "AVERAGE" + clusterRollUpType: "INDIVIDUAL" + - ConnectionCount: + alias: "ConnectionCount" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_CONNECTION_COUNT" + + - ReusableLogSize: + alias: "Reusable Log Size" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_REUSABLE_LOG_SIZE" + ibmCommand: "MQCMD_INQUIRE_Q_MGR_STATUS" + + - RestartLogSize: + alias: "Restart Log Size" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_RESTART_LOG_SIZE" + ibmCommand: "MQCMD_INQUIRE_Q_MGR_STATUS" + + - ArchiveLogSize: + alias: "Archive Log Size" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_ARCHIVE_LOG_SIZE" + ibmCommand: "MQCMD_INQUIRE_Q_MGR_STATUS" + + - StatisticsInterval: + alias: "Statistics Interval" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_STATISTICS_INTERVAL" + ibmCommand: "MQCMD_INQUIRE_Q_MGR" + + # This Object will extract queue metrics + - metricsType: "queueMetrics" + metrics: + include: + - MaxQueueDepth: + alias: "Max Queue Depth" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_MAX_Q_DEPTH" + ibmCommand: "MQCMD_INQUIRE_Q" + + - CurrentQueueDepth: + alias: "Current Queue Depth" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_CURRENT_Q_DEPTH" + ibmCommand: "MQCMD_INQUIRE_Q" + + - CurrentQueueFileSize: + alias: "Current queue file size" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_CUR_Q_FILE_SIZE" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + + - MaxQueueFileSize: + alias: "Current maximum queue file size" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_CUR_MAX_FILE_SIZE" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + + - OpenInputCount: + alias: "Open Input Count" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_OPEN_INPUT_COUNT" + ibmCommand: "MQCMD_INQUIRE_Q" + + - OpenOutputCount: + alias: "Open Output Count" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_OPEN_OUTPUT_COUNT" + ibmCommand: "MQCMD_INQUIRE_Q" + + - OldestMsgAge: + alias: "OldestMsgAge" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_OLDEST_MSG_AGE" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + aggregationType: "OBSERVATION" + timeRollUpType: "CURRENT" + clusterRollUpType: "INDIVIDUAL" + + - UncommittedMsgs: + alias: "UncommittedMsgs" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_UNCOMMITTED_MSGS" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + + - OnQTime: + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_Q_TIME_INDICATOR" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + aggregationType: "OBSERVATION" + timeRollUpType: "CURRENT" + clusterRollUpType: "INDIVIDUAL" + + - HighQDepth: + alias: "HighQDepth" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_HIGH_Q_DEPTH" + ibmCommand: "MQCMD_RESET_Q_STATS" + + - MsgDeqCount: + alias: "MsgDeqCount" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_MSG_DEQ_COUNT" + ibmCommand: "MQCMD_RESET_Q_STATS" + + - MsgEnqCount: + alias: "MsgEnqCount" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_MSG_ENQ_COUNT" + ibmCommand: "MQCMD_RESET_Q_STATS" + + - UncommittedMsgs: + alias: "Uncommitted Messages" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACF_UNCOMMITTED_MSGS" + ibmCommand: "MQCMD_INQUIRE_Q_STATUS" + + - ServiceInterval: + alias: "Service Interval" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_Q_SERVICE_INTERVAL" + ibmCommand: "MQCMD_INQUIRE_Q" + + - ServiceIntervalEvent: + alias: "Service Interval Event" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_Q_SERVICE_INTERVAL_EVENT" + ibmCommand: "MQCMD_INQUIRE_Q" + + # This Object will extract channel metrics + - metricsType: "channelMetrics" + metrics: + include: + - Messages: + alias: "Messages" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MSGS" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - Status: + alias: "Status" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_CHANNEL_STATUS" #http://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.dev.doc/q090880_.htm + aggregationType: "OBSERVATION" + timeRollUpType: "AVERAGE" + clusterRollUpType: "INDIVIDUAL" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - ByteSent: + alias: "Byte Sent" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_BYTES_SENT" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - ByteReceived: + alias: "Byte Received" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_BYTES_RECEIVED" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - BuffersSent: + alias: "Buffers Sent" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_BUFFERS_SENT" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - BuffersReceived: + alias: "Buffers Received" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_BUFFERS_RECEIVED" + ibmCommand: "MQCMD_INQUIRE_CHANNEL_STATUS" + + - CurrentSharingConversations: + alias: "Current Sharing Conversations" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_CURRENT_SHARING_CONVS" + + - MaxSharingConversations: + alias: "Max Sharing Conversations" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MAX_SHARING_CONVS" + + - MaxInstances: + alias: "Max Instances" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MAX_INSTANCES" + ibmCommand: "MQCMD_INQUIRE_CHANNEL" + + - MaxInstancesPerClient: + alias: "Max Instances per Client" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MAX_INSTS_PER_CLIENT" + ibmCommand: "MQCMD_INQUIRE_CHANNEL" + + - MsgRetryCount: + alias: "Message Retry Count" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MR_COUNT" + ibmCommand: "MQCMD_INQUIRE_CHANNEL" + + - MsgsReceived: + alias: "Message Received Count" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MSGS_RECEIVED" + ibmCommand: "MQCMD_INQUIRE_CHANNEL" + + - MsgsSent: + alias: "Message Sent" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_MSGS_SENT" + ibmCommand: "MQCMD_INQUIRE_CHANNEL" + + - metricsType: "listenerMetrics" + metrics: + include: + - Status: + alias: "Status" + ibmConstant: "com.ibm.mq.constants.CMQCFC.MQIACH_LISTENER_STATUS" + aggregationType: "OBSERVATION" + timeRollUpType: "AVERAGE" + clusterRollUpType: "INDIVIDUAL" + + # This Object will extract topic metrics + - metricsType: "topicMetrics" + metrics: + include: + - PublishCount: + alias: "Publish Count" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_PUB_COUNT" + ibmCommand: "MQCMD_INQUIRE_TOPIC_STATUS" + - SubscriptionCount: + alias: "Subscription Count" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_SUB_COUNT" + ibmCommand: "MQCMD_INQUIRE_TOPIC_STATUS" + + # Sets up reading configuration events from the configuration queue. + - metricsType: "configurationMetrics" + metrics: + include: + - MaxHandles: + alias: "Max Handles" + ibmConstant: "com.ibm.mq.constants.CMQC.MQIA_MAX_HANDLES" + +#Run it as a scheduled task instead of running every minute. +#If you want to run this every minute, comment this out +#taskSchedule: +# numberOfThreads: 1 +# taskDelaySeconds: 300 + + +sslConnection: + trustStorePath: "" + trustStorePassword: "" + trustStoreEncryptedPassword: "" + + keyStorePath: "" + keyStorePassword: "" + keyStoreEncryptedPassword: "" + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.exporter.otlp.endpoint: http://0.0.0.0:4318 diff --git a/ibm-mq-metrics/src/integrationTest/resources/conf/test-config.yml b/ibm-mq-metrics/src/integrationTest/resources/conf/test-config.yml new file mode 100644 index 000000000..0093bc9e7 --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/resources/conf/test-config.yml @@ -0,0 +1,197 @@ + +#This is the timeout on queue metrics and channel metrics threads.Default value is 20 seconds. +#No need to change the default unless you know what you are doing. +#queueMetricsCollectionTimeoutInSeconds: 40 +#channelMetricsCollectionTimeoutInSeconds: 40 +#topicMetricsCollectionTimeoutInSeconds: 40 + +queueManagers: + - name: "QM1" + host: "localhost" + port: 1414 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Client" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + channelName: DEV.ADMIN.SVRCONN + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + username: "admin" + password: "passw0rd" + + #PCF requests are always sent to SYSTEM.ADMIN.COMMAND.QUEUE. The PCF responses to these requests are sent to the default reply-to queue called + #SYSTEM.DEFAULT.MODEL.QUEUE. However, you can override this behavior and send it to a temporary dynamic queue by changing the modelQueueName and replyQueuePrefix fields. + #For more details around this https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q083240_.htm & https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.adm.doc/q020010_.htm + #modelQueueName: "" + #replyQueuePrefix: "" + + + #Sets the CCSID used in the message descriptor of request and response messages. The default value is MQC.MQCCSI_Q_MGR. + #To set this, please use the integer value. + #ccsid: + + #Sets the encoding used in the message descriptor of request and response messages. The default value is MQC.MQENC_NATIVE. + #To set this, please use the integer value. + #encoding: + + # IBM Cipher Suite e.g. "SSL_RSA_WITH_AES_128_CBC_SHA256".. + # For translation to IBM Cipher http://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.dev.doc/q113210_.htm + # A cipher working for IBM Cloud MQ and Temurin JDK 8 is TLS_AES_128_GCM_SHA256 + #cipherSuite: "TLS_AES_128_GCM_SHA256" + + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","$SYS"] + +metrics: + "ibm.mq.message.retry.count": # Number of message retries + enabled: true + "ibm.mq.status": # Channel status + enabled: true + "ibm.mq.max.sharing.conversations": # Maximum number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.current.sharing.conversations": # Current number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.byte.received": # Number of bytes received + enabled: true + "ibm.mq.byte.sent": # Number of bytes sent + enabled: true + "ibm.mq.buffers.received": # Buffers received + enabled: true + "ibm.mq.buffers.sent": # Buffers sent + enabled: true + "ibm.mq.message.count": # Message count + enabled: true + "ibm.mq.open.input.count": # Count of applications sending messages to the queue + enabled: true + "ibm.mq.open.output.count": # Count of applications consuming messages from the queue + enabled: true + "ibm.mq.high.queue.depth": # The current high queue depth + enabled: true + "ibm.mq.service.interval": # The queue service interval + enabled: true + "ibm.mq.queue.depth.full.event": # The number of full queue events + enabled: true + "ibm.mq.queue.depth.high.event": # The number of high queue events + enabled: true + "ibm.mq.queue.depth.low.event": # The number of low queue events + enabled: true + "ibm.mq.uncommitted.messages": # Number of uncommitted messages + enabled: true + "ibm.mq.oldest.msg.age": # Queue message oldest age + enabled: true + "ibm.mq.current.max.queue.filesize": # Current maximum queue file size + enabled: true + "ibm.mq.current.queue.filesize": # Current queue file size + enabled: true + "ibm.mq.instances.per.client": # Instances per client + enabled: true + "ibm.mq.message.deq.count": # Message dequeue count + enabled: true + "ibm.mq.message.enq.count": # Message enqueue count + enabled: true + "ibm.mq.queue.depth": # Current queue depth + enabled: true + "ibm.mq.service.interval.event": # Queue service interval event + enabled: true + "ibm.mq.reusable.log.size": # The amount of space occupied, in megabytes, by log extents available to be reused. + enabled: true + "ibm.mq.manager.active.channels": # The queue manager active maximum channels limit + enabled: true + "ibm.mq.restart.log.size": # Size of the log data required for restart recovery in megabytes. + enabled: true + "ibm.mq.max.queue.depth": # Maximum queue depth + enabled: true + "ibm.mq.onqtime.short_period": # Amount of time, in microseconds, that a message spent on the queue, over a short period + enabled: true + "ibm.mq.onqtime.long_period": # Amount of time, in microseconds, that a message spent on the queue, over a longer period + enabled: true + "ibm.mq.message.received.count": # Number of messages received + enabled: true + "ibm.mq.message.sent.count": # Number of messages sent + enabled: true + "ibm.mq.max.instances": # Max channel instances + enabled: true + "ibm.mq.connection.count": # Active connections count + enabled: true + "ibm.mq.manager.status": # Queue manager status + enabled: true + "ibm.mq.heartbeat": # Queue manager heartbeat + enabled: true + "ibm.mq.archive.log.size": # Queue manager archive log size + enabled: true + "ibm.mq.manager.max.active.channels": # Queue manager max active channels + enabled: true + "ibm.mq.manager.statistics.interval": # Queue manager statistics interval + enabled: true + "ibm.mq.publish.count": # Topic publication count + enabled: true + "ibm.mq.subscription.count": # Topic subscription count + enabled: true + "ibm.mq.listener.status": # Listener status + enabled: true + "ibm.mq.unauthorized.event": # Number of authentication error events + enabled: true + "ibm.mq.manager.max.handles": # Max open handles + enabled: true + +sslConnection: + trustStorePath: "" + trustStorePassword: "" + + keyStorePath: "" + keyStorePassword: "" + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.exporter.otlp.endpoint: http://0.0.0.0:4318 + otel.exporter.otlp.protocol: http/protobuf + otel.metric.export.interval: 5s + otel.logs.exporter: none + otel.traces.exporter: none diff --git a/ibm-mq-metrics/src/integrationTest/resources/conf/test-queuemgr-config.yml b/ibm-mq-metrics/src/integrationTest/resources/conf/test-queuemgr-config.yml new file mode 100644 index 000000000..9d2808906 --- /dev/null +++ b/ibm-mq-metrics/src/integrationTest/resources/conf/test-queuemgr-config.yml @@ -0,0 +1,170 @@ +queueManagers: + - name: "QM1" + host: "localhost" + port: 1414 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Client" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + channelName: DEV.ADMIN.SVRCONN + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + username: "admin" + password: "passw0rd" + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","$SYS"] + +metrics: + "ibm.mq.message.retry.count": # Number of message retries + enabled: true + "ibm.mq.status": # Channel status + enabled: true + "ibm.mq.max.sharing.conversations": # Maximum number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.current.sharing.conversations": # Current number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.byte.received": # Number of bytes received + enabled: true + "ibm.mq.byte.sent": # Number of bytes sent + enabled: true + "ibm.mq.buffers.received": # Buffers received + enabled: true + "ibm.mq.buffers.sent": # Buffers sent + enabled: true + "ibm.mq.message.count": # Message count + enabled: true + "ibm.mq.open.input.count": # Count of applications sending messages to the queue + enabled: true + "ibm.mq.open.output.count": # Count of applications consuming messages from the queue + enabled: true + "ibm.mq.high.queue.depth": # The current high queue depth + enabled: true + "ibm.mq.service.interval": # The queue service interval + enabled: true + "ibm.mq.queue.depth.full.event": # The number of full queue events + enabled: true + "ibm.mq.queue.depth.high.event": # The number of high queue events + enabled: true + "ibm.mq.queue.depth.low.event": # The number of low queue events + enabled: true + "ibm.mq.uncommitted.messages": # Number of uncommitted messages + enabled: true + "ibm.mq.oldest.msg.age": # Queue message oldest age + enabled: true + "ibm.mq.current.max.queue.filesize": # Current maximum queue file size + enabled: true + "ibm.mq.current.queue.filesize": # Current queue file size + enabled: true + "ibm.mq.instances.per.client": # Instances per client + enabled: true + "ibm.mq.message.deq.count": # Message dequeue count + enabled: true + "ibm.mq.message.enq.count": # Message enqueue count + enabled: true + "ibm.mq.queue.depth": # Current queue depth + enabled: true + "ibm.mq.service.interval.event": # Queue service interval event + enabled: true + "ibm.mq.reusable.log.size": # The amount of space occupied, in megabytes, by log extents available to be reused. + enabled: true + "ibm.mq.manager.active.channels": # The queue manager active maximum channels limit + enabled: true + "ibm.mq.restart.log.size": # Size of the log data required for restart recovery in megabytes. + enabled: true + "ibm.mq.max.queue.depth": # Maximum queue depth + enabled: true + "ibm.mq.onqtime.short_period": # Amount of time, in microseconds, that a message spent on the queue, over a short period + enabled: true + "ibm.mq.onqtime.long_period": # Amount of time, in microseconds, that a message spent on the queue, over a longer period + enabled: true + "ibm.mq.message.received.count": # Number of messages received + enabled: true + "ibm.mq.message.sent.count": # Number of messages sent + enabled: true + "ibm.mq.max.instances": # Max channel instances + enabled: true + "ibm.mq.connection.count": # Active connections count + enabled: true + "ibm.mq.manager.status": # Queue manager status + enabled: true + "ibm.mq.heartbeat": # Queue manager heartbeat + enabled: true + "ibm.mq.archive.log.size": # Queue manager archive log size + enabled: true + "ibm.mq.manager.max.active.channels": # Queue manager max active channels + enabled: true + "ibm.mq.manager.statistics.interval": # Queue manager statistics interval + enabled: true + "ibm.mq.publish.count": # Topic publication count + enabled: true + "ibm.mq.subscription.count": # Topic subscription count + enabled: true + "ibm.mq.listener.status": # Listener status + enabled: true + "ibm.mq.unauthorized.event": # Number of authentication error events + enabled: true + "ibm.mq.manager.max.handles": # Max open handles + enabled: true + + +sslConnection: + trustStorePath: "" + trustStorePassword: "" + trustStoreEncryptedPassword: "" + + keyStorePath: "" + keyStorePassword: "" + keyStoreEncryptedPassword: "" + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.metric.export.interval: 5s + otel.logs.exporter: none + otel.traces.exporter: none diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqContext.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqContext.java new file mode 100644 index 000000000..b3fafadb6 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqContext.java @@ -0,0 +1,106 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq; + +import com.ibm.mq.constants.CMQC; +import io.opentelemetry.ibm.mq.config.QueueManager; +import java.util.Hashtable; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Takes care of websphere mq connection, authentication, SSL, Cipher spec, certificate based + * authorization.
+ * It also validates the arguments passed for various scenarios. + */ +public final class WmqContext { + private static final String TRANSPORT_TYPE_CLIENT = "Client"; + private static final String TRANSPORT_TYPE_BINDINGS = "Bindings"; + + public static final Logger logger = LoggerFactory.getLogger(WmqContext.class); + private final QueueManager queueManager; + + public WmqContext(QueueManager queueManager) { + this.queueManager = queueManager; + validateArgs(); + } + + /** Note: This Hashtable type is needed for IBM client classes. */ + @SuppressWarnings("JdkObsolete") + public Hashtable getMqEnvironment() { + Hashtable env = new Hashtable<>(); + addEnvProperty(env, CMQC.HOST_NAME_PROPERTY, queueManager.getHost()); + addEnvProperty(env, CMQC.PORT_PROPERTY, queueManager.getPort()); + addEnvProperty(env, CMQC.CHANNEL_PROPERTY, queueManager.getChannelName()); + addEnvProperty(env, CMQC.USER_ID_PROPERTY, queueManager.getUsername()); + addEnvProperty(env, CMQC.PASSWORD_PROPERTY, queueManager.getPassword()); + addEnvProperty(env, CMQC.SSL_CERT_STORE_PROPERTY, queueManager.getSslKeyRepository()); + addEnvProperty(env, CMQC.SSL_CIPHER_SUITE_PROPERTY, queueManager.getCipherSuite()); + // TODO: investigate on CIPHER_SPEC property No Available in MQ 7.5 Jar + + if (TRANSPORT_TYPE_CLIENT.equalsIgnoreCase(queueManager.getTransportType())) { + addEnvProperty(env, CMQC.TRANSPORT_PROPERTY, CMQC.TRANSPORT_MQSERIES_CLIENT); + } else if (TRANSPORT_TYPE_BINDINGS.equalsIgnoreCase(queueManager.getTransportType())) { + addEnvProperty(env, CMQC.TRANSPORT_PROPERTY, CMQC.TRANSPORT_MQSERIES_BINDINGS); + } else { + addEnvProperty(env, CMQC.TRANSPORT_PROPERTY, CMQC.TRANSPORT_MQSERIES); + } + + if (logger.isDebugEnabled()) { + logger.debug("Transport property is {}", env.get(CMQC.TRANSPORT_PROPERTY)); + } + return env; + } + + @SuppressWarnings({"unused", "unchecked", "rawtypes"}) + private static void addEnvProperty(Hashtable env, String propName, @Nullable Object propVal) { + if (null != propVal) { + if (propVal instanceof String) { + String propString = (String) propVal; + if (propString.isEmpty()) { + return; + } + } + env.put(propName, propVal); + } + } + + private void validateArgs() { + boolean validArgs = true; + StringBuilder errorMsg = new StringBuilder(); + if (queueManager == null) { + validArgs = false; + errorMsg.append("Queue manager cannot be null"); + } else { + if (TRANSPORT_TYPE_CLIENT.equalsIgnoreCase(queueManager.getTransportType())) { + if (queueManager.getHost() == null || queueManager.getHost().trim().isEmpty()) { + validArgs = false; + errorMsg.append("Host cannot be null or empty for client type connection. "); + } + if (queueManager.getPort() == -1) { + validArgs = false; + errorMsg.append("port should be set for client type connection. "); + } + if (queueManager.getChannelName() == null + || queueManager.getChannelName().trim().isEmpty()) { + validArgs = false; + errorMsg.append("Channel cannot be null or empty for client type connection. "); + } + } + if (TRANSPORT_TYPE_BINDINGS.equalsIgnoreCase(queueManager.getTransportType())) { + if (queueManager.getName() == null || queueManager.getName().trim().isEmpty()) { + validArgs = false; + errorMsg.append("queuemanager cannot be null or empty for bindings type connection. "); + } + } + } + + if (!validArgs) { + throw new IllegalArgumentException(errorMsg.toString()); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqMonitor.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqMonitor.java new file mode 100644 index 000000000..9611604cb --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/WmqMonitor.java @@ -0,0 +1,221 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.ERROR_CODE; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.MQException; +import com.ibm.mq.MQQueueManager; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.metricscollector.ChannelMetricsCollector; +import io.opentelemetry.ibm.mq.metricscollector.InquireChannelCmdCollector; +import io.opentelemetry.ibm.mq.metricscollector.InquireQueueManagerCmdCollector; +import io.opentelemetry.ibm.mq.metricscollector.ListenerMetricsCollector; +import io.opentelemetry.ibm.mq.metricscollector.MetricsCollectorContext; +import io.opentelemetry.ibm.mq.metricscollector.PerformanceEventQueueCollector; +import io.opentelemetry.ibm.mq.metricscollector.QueueManagerEventCollector; +import io.opentelemetry.ibm.mq.metricscollector.QueueManagerMetricsCollector; +import io.opentelemetry.ibm.mq.metricscollector.QueueMetricsCollector; +import io.opentelemetry.ibm.mq.metricscollector.ReadConfigurationEventQueueCollector; +import io.opentelemetry.ibm.mq.metricscollector.TopicMetricsCollector; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.ibm.mq.util.WmqUtil; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; +import javax.annotation.Nullable; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class WmqMonitor { + + private static final Logger logger = LoggerFactory.getLogger(WmqMonitor.class); + + private final List queueManagers; + private final List> jobs = new ArrayList<>(); + private final LongCounter errorCodesCounter; + private final LongGauge heartbeatGauge; + private final ExecutorService threadPool; + private final MetricsConfig metricsConfig; + + public WmqMonitor(ConfigWrapper config, ExecutorService threadPool, Meter meter) { + List> queueManagers = getQueueManagers(config); + ObjectMapper mapper = new ObjectMapper(); + + this.queueManagers = new ArrayList<>(); + + for (Map queueManager : queueManagers) { + try { + QueueManager qManager = mapper.convertValue(queueManager, QueueManager.class); + this.queueManagers.add(qManager); + } catch (Throwable t) { + logger.error("Error preparing queue manager {}", queueManager, t); + } + } + + this.metricsConfig = new MetricsConfig(config); + + this.heartbeatGauge = Metrics.createIbmMqHeartbeat(meter); + this.errorCodesCounter = Metrics.createIbmMqConnectionErrors(meter); + this.threadPool = threadPool; + + jobs.add(new QueueManagerMetricsCollector(meter)); + jobs.add(new InquireQueueManagerCmdCollector(meter)); + jobs.add(new ChannelMetricsCollector(meter)); + jobs.add(new InquireChannelCmdCollector(meter)); + jobs.add(new QueueMetricsCollector(meter, threadPool, config)); + jobs.add(new ListenerMetricsCollector(meter)); + jobs.add(new TopicMetricsCollector(meter)); + jobs.add(new ReadConfigurationEventQueueCollector(meter)); + jobs.add(new PerformanceEventQueueCollector(meter)); + jobs.add(new QueueManagerEventCollector(meter)); + } + + public void run() { + for (QueueManager qm : this.queueManagers) { + run(qm); + } + } + + public void run(QueueManager queueManager) { + String queueManagerName = queueManager.getName(); + logger.debug("WMQMonitor thread for queueManager {} started.", queueManagerName); + long startTime = System.currentTimeMillis(); + MQQueueManager ibmQueueManager = null; + PCFMessageAgent agent = null; + int heartBeatMetricValue = 0; + try { + ibmQueueManager = WmqUtil.connectToQueueManager(queueManager); + heartBeatMetricValue = 1; + agent = WmqUtil.initPcfMessageAgent(queueManager, ibmQueueManager); + extractAndReportMetrics(ibmQueueManager, queueManager, agent); + } catch (RuntimeException e) { + logger.error( + "Error connecting to QueueManager {} by thread {}: {}", + queueManagerName, + Thread.currentThread().getName(), + e.getMessage(), + e); + if (e.getCause() instanceof MQException) { + MQException mqe = (MQException) e.getCause(); + String errorCode = String.valueOf(mqe.getReason()); + errorCodesCounter.add( + 1, Attributes.of(IBM_MQ_QUEUE_MANAGER, queueManagerName, ERROR_CODE, errorCode)); + } + } finally { + if (this.metricsConfig.isIbmMqHeartbeatEnabled()) { + heartbeatGauge.set( + heartBeatMetricValue, Attributes.of(IBM_MQ_QUEUE_MANAGER, queueManagerName)); + } + cleanUp(ibmQueueManager, agent); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "WMQMonitor thread for queueManager {} ended. Time taken = {} ms", + queueManagerName, + endTime); + } + } + + @NotNull + private static List> getQueueManagers(ConfigWrapper config) { + List> queueManagers = config.getQueueManagers(); + if (queueManagers.isEmpty()) { + throw new IllegalStateException( + "The 'queueManagers' section in config.yml is empty or otherwise incorrect."); + } + return queueManagers; + } + + private void extractAndReportMetrics( + MQQueueManager mqQueueManager, QueueManager queueManager, PCFMessageAgent agent) { + logger.debug("Queueing {} jobs", jobs.size()); + MetricsCollectorContext context = + new MetricsCollectorContext(queueManager, agent, mqQueueManager, this.metricsConfig); + List> tasks = new ArrayList<>(); + for (Consumer collector : jobs) { + tasks.add( + () -> { + try { + long startTime = System.currentTimeMillis(); + collector.accept(context); + long diffTime = System.currentTimeMillis() - startTime; + if (diffTime > 60000L) { + logger.warn( + "{} Task took {} ms to complete", + collector.getClass().getSimpleName(), + diffTime); + } else { + logger.debug( + "{} Task took {} ms to complete", + collector.getClass().getSimpleName(), + diffTime); + } + } catch (RuntimeException e) { + logger.error( + "Error while running task name = {}", collector.getClass().getSimpleName(), e); + } + return null; + }); + } + + try { + this.threadPool.invokeAll(tasks); + } catch (InterruptedException e) { + logger.error("Error while the thread {} is waiting ", Thread.currentThread().getName(), e); + } + } + + /** Destroy the agent and disconnect from queue manager */ + private static void cleanUp( + @Nullable MQQueueManager ibmQueueManager, @Nullable PCFMessageAgent agent) { + // Disconnect the agent. + + if (agent != null) { + String qMgrName = agent.getQManagerName(); + try { + agent.disconnect(); + logger.debug( + "PCFMessageAgent disconnected for queueManager {} in thread {}", + qMgrName, + Thread.currentThread().getName()); + } catch (Exception e) { + logger.error( + "Error occurred while disconnecting PCFMessageAgent for queueManager {} in thread {}", + qMgrName, + Thread.currentThread().getName(), + e); + } + } + + // Disconnect queue manager + if (ibmQueueManager != null) { + String name = ""; + try { + name = ibmQueueManager.getName(); + ibmQueueManager.disconnect(); + } catch (Exception e) { + logger.error( + "Error occurred while disconnecting queueManager {} in thread {}", + name, + Thread.currentThread().getName(), + e); + } + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ExcludeFilters.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ExcludeFilters.java new file mode 100644 index 000000000..862edd30a --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ExcludeFilters.java @@ -0,0 +1,85 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.config; + +import io.opentelemetry.ibm.mq.metricscollector.FilterType; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** A jackson databind class used for config. */ +public final class ExcludeFilters { + + private String type = "UNKNOWN"; + private Set values = new HashSet<>(); + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Set getValues() { + return Collections.unmodifiableSet(values); + } + + public void setValues(Set values) { + this.values = new HashSet<>(values); + } + + public static boolean isExcluded(String resourceName, Collection excludeFilters) { + if (excludeFilters == null) { + return false; + } + for (ExcludeFilters filter : excludeFilters) { + if (filter.isExcluded(resourceName)) { + return true; + } + } + return false; + } + + public boolean isExcluded(String resourceName) { + if (resourceName == null || resourceName.isEmpty()) { + return true; + } + switch (FilterType.valueOf(type)) { + case CONTAINS: + for (String filterValue : values) { + if (resourceName.contains(filterValue)) { + return true; + } + } + break; + case STARTSWITH: + for (String filterValue : values) { + if (resourceName.startsWith(filterValue)) { + return true; + } + } + break; + case NONE: + return false; + case EQUALS: + for (String filterValue : values) { + if (resourceName.equals(filterValue)) { + return true; + } + } + break; + case ENDSWITH: + for (String filterValue : values) { + if (resourceName.endsWith(filterValue)) { + return true; + } + } + } + return false; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/QueueManager.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/QueueManager.java new file mode 100644 index 000000000..11769832f --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/QueueManager.java @@ -0,0 +1,257 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.config; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import javax.annotation.Nullable; + +/** This is a jackson databind class used purely for config. */ +@JsonIgnoreProperties(ignoreUnknown = true) +public final class QueueManager { + + @Nullable private String host; + private int port = -1; + private String name = "UNKNOWN"; + @Nullable private String channelName; + @Nullable private String transportType; + @Nullable private String username; + @Nullable private String password; + @Nullable private String sslKeyRepository; + private int ccsid = Integer.MIN_VALUE; + private int encoding = Integer.MIN_VALUE; + @Nullable private String cipherSuite; + @Nullable private String cipherSpec; + @Nullable private String replyQueuePrefix; + @Nullable private String modelQueueName; + private String configurationQueueName = "SYSTEM.ADMIN.CONFIG.EVENT"; + private String performanceEventsQueueName = "SYSTEM.ADMIN.PERFM.EVENT"; + private String queueManagerEventsQueueName = "SYSTEM.ADMIN.QMGR.EVENT"; + private long consumeConfigurationEventInterval; + private boolean refreshQueueManagerConfigurationEnabled; + // Config default is 100. + // https://www.ibm.com/docs/en/ibm-mq/9.3.x?topic=qmini-channels-stanza-file + private int maxActiveChannels = 100; + + @Nullable private ResourceFilters queueFilters; + @Nullable private ResourceFilters channelFilters; + @Nullable private ResourceFilters listenerFilters; + @Nullable private ResourceFilters topicFilters; + + @Nullable + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Nullable + public String getChannelName() { + return channelName; + } + + public void setChannelName(@Nullable String channelName) { + this.channelName = channelName; + } + + @Nullable + public String getTransportType() { + return transportType; + } + + public void setTransportType(@Nullable String transportType) { + this.transportType = transportType; + } + + @Nullable + public String getUsername() { + return username; + } + + public void setUsername(@Nullable String username) { + this.username = username; + } + + @Nullable + public String getPassword() { + return password; + } + + public void setPassword(@Nullable String password) { + this.password = password; + } + + public ResourceFilters getQueueFilters() { + if (queueFilters == null) { + return new ResourceFilters(); + } + return queueFilters; + } + + public void setQueueFilters(@Nullable ResourceFilters queueFilters) { + this.queueFilters = queueFilters; + } + + @Nullable + public String getSslKeyRepository() { + return sslKeyRepository; + } + + public void setSslKeyRepository(@Nullable String sslKeyRepository) { + this.sslKeyRepository = sslKeyRepository; + } + + @Nullable + public String getCipherSuite() { + return cipherSuite; + } + + public void setCipherSuite(String cipherSuite) { + this.cipherSuite = cipherSuite; + } + + @Nullable + public String getCipherSpec() { + return cipherSpec; + } + + public void setCipherSpec(@Nullable String cipherSpec) { + this.cipherSpec = cipherSpec; + } + + public ResourceFilters getChannelFilters() { + if (channelFilters == null) { + return new ResourceFilters(); + } + return channelFilters; + } + + public void setChannelFilters(@Nullable ResourceFilters channelFilters) { + this.channelFilters = channelFilters; + } + + @Nullable + public String getReplyQueuePrefix() { + return replyQueuePrefix; + } + + public void setReplyQueuePrefix(@Nullable String replyQueuePrefix) { + this.replyQueuePrefix = replyQueuePrefix; + } + + @Nullable + public String getModelQueueName() { + return modelQueueName; + } + + public void setModelQueueName(@Nullable String modelQueueName) { + this.modelQueueName = modelQueueName; + } + + public ResourceFilters getListenerFilters() { + if (listenerFilters == null) { + return new ResourceFilters(); + } + return listenerFilters; + } + + public void setListenerFilters(@Nullable ResourceFilters listenerFilters) { + this.listenerFilters = listenerFilters; + } + + public int getCcsid() { + return ccsid; + } + + public void setCcsid(int ccsid) { + this.ccsid = ccsid; + } + + public int getEncoding() { + return encoding; + } + + public void setEncoding(int encoding) { + this.encoding = encoding; + } + + public ResourceFilters getTopicFilters() { + if (topicFilters == null) { + return new ResourceFilters(); + } + return topicFilters; + } + + public void setTopicFilters(@Nullable ResourceFilters topicFilters) { + this.topicFilters = topicFilters; + } + + public String getConfigurationQueueName() { + return this.configurationQueueName; + } + + public void setConfigurationQueueName(String configurationQueueName) { + this.configurationQueueName = configurationQueueName; + } + + public long getConsumeConfigurationEventInterval() { + return this.consumeConfigurationEventInterval; + } + + public void setConsumeConfigurationEventInterval(long consumeConfigurationEventInterval) { + this.consumeConfigurationEventInterval = consumeConfigurationEventInterval; + } + + public boolean isRefreshQueueManagerConfigurationEnabled() { + return refreshQueueManagerConfigurationEnabled; + } + + public void setRefreshQueueManagerConfigurationEnabled( + boolean refreshQueueManagerConfigurationEnabled) { + this.refreshQueueManagerConfigurationEnabled = refreshQueueManagerConfigurationEnabled; + } + + public String getPerformanceEventsQueueName() { + return performanceEventsQueueName; + } + + public void setPerformanceEventsQueueName(String performanceEventsQueueName) { + this.performanceEventsQueueName = performanceEventsQueueName; + } + + public String getQueueManagerEventsQueueName() { + return this.queueManagerEventsQueueName; + } + + public void setQueueManagerEventsQueueName(String queueManagerEventsQueueName) { + this.queueManagerEventsQueueName = queueManagerEventsQueueName; + } + + public int getMaxActiveChannels() { + return maxActiveChannels; + } + + public void setMaxActiveChannels(int maxActiveChannels) { + this.maxActiveChannels = maxActiveChannels; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ResourceFilters.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ResourceFilters.java new file mode 100644 index 000000000..86a30360e --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/config/ResourceFilters.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.config; + +import java.util.HashSet; +import java.util.Set; + +public final class ResourceFilters { + + private Set include = new HashSet<>(); + private Set exclude = new HashSet<>(); + + public Set getInclude() { + return include; + } + + public void setInclude(Set include) { + this.include = include; + } + + public Set getExclude() { + return exclude; + } + + public void setExclude(Set exclude) { + this.exclude = exclude; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/IbmMqAttributes.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/IbmMqAttributes.java new file mode 100644 index 000000000..fd84b6871 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/IbmMqAttributes.java @@ -0,0 +1,54 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metrics; + +import static io.opentelemetry.api.common.AttributeKey.longKey; +import static io.opentelemetry.api.common.AttributeKey.stringKey; + +import io.opentelemetry.api.common.AttributeKey; + +// This file is generated using weaver. Do not edit manually. + +/** Attribute definitions generated from a Weaver model. Do not edit manually. */ +public final class IbmMqAttributes { + + /** The name of the IBM queue manager */ + public static final AttributeKey IBM_MQ_QUEUE_MANAGER = stringKey("ibm.mq.queue.manager"); + + /** The system-specific name of the messaging operation. */ + public static final AttributeKey MESSAGING_DESTINATION_NAME = + stringKey("messaging.destination.name"); + + /** The name of the channel */ + public static final AttributeKey IBM_MQ_CHANNEL_NAME = stringKey("ibm.mq.channel.name"); + + /** The type of the channel */ + public static final AttributeKey IBM_MQ_CHANNEL_TYPE = stringKey("ibm.mq.channel.type"); + + /** The job name */ + public static final AttributeKey IBM_MQ_JOB_NAME = stringKey("ibm.mq.job.name"); + + /** The start time of the channel as seconds since Epoch. */ + public static final AttributeKey IBM_MQ_CHANNEL_START_TIME = + longKey("ibm.mq.channel.start.time"); + + /** The queue type */ + public static final AttributeKey IBM_MQ_QUEUE_TYPE = stringKey("ibm.mq.queue.type"); + + /** The listener name */ + public static final AttributeKey IBM_MQ_LISTENER_NAME = stringKey("ibm.mq.listener.name"); + + /** Short name or login/username of the user. */ + public static final AttributeKey USER_NAME = stringKey("user.name"); + + /** Logical name of the service. */ + public static final AttributeKey SERVICE_NAME = stringKey("service.name"); + + /** The reason code associated with an error */ + public static final AttributeKey ERROR_CODE = stringKey("error.code"); + + private IbmMqAttributes() {} +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/Metrics.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/Metrics.java new file mode 100644 index 000000000..3698c87c9 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/Metrics.java @@ -0,0 +1,432 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metrics; + +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import java.util.function.Function; + +// This file is generated using weaver. Do not edit manually. + +/** Metric definitions generated from a Weaver model. Do not edit manually. */ +public final class Metrics { + public static final Function MIBY_TO_BYTES = x -> x * 1024L * 1024L; + + private Metrics() {} + + public static LongGauge createIbmMqMessageRetryCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.retry.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Number of message retries") + .build(); + } + + public static LongGauge createIbmMqStatus(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.status") + .ofLongs() + .setUnit("1") + .setDescription("Channel status") + .build(); + } + + public static LongGauge createIbmMqMaxSharingConversations(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.max.sharing.conversations") + .ofLongs() + .setUnit("{conversation}") + .setDescription("Maximum number of conversations permitted on this channel instance.") + .build(); + } + + public static LongGauge createIbmMqCurrentSharingConversations(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.current.sharing.conversations") + .ofLongs() + .setUnit("{conversation}") + .setDescription("Current number of conversations permitted on this channel instance.") + .build(); + } + + public static LongGauge createIbmMqByteReceived(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.byte.received") + .ofLongs() + .setUnit("By") + .setDescription("Number of bytes received") + .build(); + } + + public static LongGauge createIbmMqByteSent(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.byte.sent") + .ofLongs() + .setUnit("By") + .setDescription("Number of bytes sent") + .build(); + } + + public static LongGauge createIbmMqBuffersReceived(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.buffers.received") + .ofLongs() + .setUnit("{buffer}") + .setDescription("Buffers received") + .build(); + } + + public static LongGauge createIbmMqBuffersSent(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.buffers.sent") + .ofLongs() + .setUnit("{buffer}") + .setDescription("Buffers sent") + .build(); + } + + public static LongGauge createIbmMqMessageCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Message count") + .build(); + } + + public static LongGauge createIbmMqOpenInputCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.open.input.count") + .ofLongs() + .setUnit("{application}") + .setDescription("Count of applications sending messages to the queue") + .build(); + } + + public static LongGauge createIbmMqOpenOutputCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.open.output.count") + .ofLongs() + .setUnit("{application}") + .setDescription("Count of applications consuming messages from the queue") + .build(); + } + + public static LongGauge createIbmMqHighQueueDepth(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.high.queue.depth") + .ofLongs() + .setUnit("{percent}") + .setDescription("The current high queue depth") + .build(); + } + + public static LongGauge createIbmMqServiceInterval(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.service.interval") + .ofLongs() + .setUnit("{percent}") + .setDescription("The queue service interval") + .build(); + } + + public static LongCounter createIbmMqQueueDepthFullEvent(Meter meter) { + return meter + .counterBuilder("ibm.mq.queue.depth.full.event") + .setUnit("{event}") + .setDescription("The number of full queue events") + .build(); + } + + public static LongCounter createIbmMqQueueDepthHighEvent(Meter meter) { + return meter + .counterBuilder("ibm.mq.queue.depth.high.event") + .setUnit("{event}") + .setDescription("The number of high queue events") + .build(); + } + + public static LongCounter createIbmMqQueueDepthLowEvent(Meter meter) { + return meter + .counterBuilder("ibm.mq.queue.depth.low.event") + .setUnit("{event}") + .setDescription("The number of low queue events") + .build(); + } + + public static LongGauge createIbmMqUncommittedMessages(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.uncommitted.messages") + .ofLongs() + .setUnit("{message}") + .setDescription("Number of uncommitted messages") + .build(); + } + + public static LongGauge createIbmMqOldestMsgAge(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.oldest.msg.age") + .ofLongs() + .setUnit("microseconds") + .setDescription("Queue message oldest age") + .build(); + } + + public static LongGauge createIbmMqCurrentMaxQueueFilesize(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.current.max.queue.filesize") + .ofLongs() + .setUnit("By") + .setDescription("Current maximum queue file size") + .build(); + } + + public static LongGauge createIbmMqCurrentQueueFilesize(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.current.queue.filesize") + .ofLongs() + .setUnit("By") + .setDescription("Current queue file size") + .build(); + } + + public static LongGauge createIbmMqInstancesPerClient(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.instances.per.client") + .ofLongs() + .setUnit("{instance}") + .setDescription("Instances per client") + .build(); + } + + public static LongGauge createIbmMqMessageDeqCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.deq.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Message dequeue count") + .build(); + } + + public static LongGauge createIbmMqMessageEnqCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.enq.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Message enqueue count") + .build(); + } + + public static LongGauge createIbmMqQueueDepth(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.queue.depth") + .ofLongs() + .setUnit("{message}") + .setDescription("Current queue depth") + .build(); + } + + public static LongGauge createIbmMqServiceIntervalEvent(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.service.interval.event") + .ofLongs() + .setUnit("1") + .setDescription("Queue service interval event") + .build(); + } + + public static LongGauge createIbmMqReusableLogSize(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.reusable.log.size") + .ofLongs() + .setUnit("By") + .setDescription( + "The amount of space occupied, in megabytes, by log extents available to be reused.") + .build(); + } + + public static LongGauge createIbmMqManagerActiveChannels(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.manager.active.channels") + .ofLongs() + .setUnit("{channel}") + .setDescription("The queue manager active maximum channels limit") + .build(); + } + + public static LongGauge createIbmMqRestartLogSize(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.restart.log.size") + .ofLongs() + .setUnit("By") + .setDescription("Size of the log data required for restart recovery in megabytes.") + .build(); + } + + public static LongGauge createIbmMqMaxQueueDepth(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.max.queue.depth") + .ofLongs() + .setUnit("{message}") + .setDescription("Maximum queue depth") + .build(); + } + + public static LongGauge createIbmMqOnqtimeShortPeriod(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.onqtime.short_period") + .ofLongs() + .setUnit("microseconds") + .setDescription( + "Amount of time, in microseconds, that a message spent on the queue, over a short period") + .build(); + } + + public static LongGauge createIbmMqOnqtimeLongPeriod(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.onqtime.long_period") + .ofLongs() + .setUnit("microseconds") + .setDescription( + "Amount of time, in microseconds, that a message spent on the queue, over a longer period") + .build(); + } + + public static LongGauge createIbmMqMessageReceivedCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.received.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Number of messages received") + .build(); + } + + public static LongGauge createIbmMqMessageSentCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.message.sent.count") + .ofLongs() + .setUnit("{message}") + .setDescription("Number of messages sent") + .build(); + } + + public static LongGauge createIbmMqMaxInstances(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.max.instances") + .ofLongs() + .setUnit("{instance}") + .setDescription("Max channel instances") + .build(); + } + + public static LongGauge createIbmMqConnectionCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.connection.count") + .ofLongs() + .setUnit("{connection}") + .setDescription("Active connections count") + .build(); + } + + public static LongGauge createIbmMqManagerStatus(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.manager.status") + .ofLongs() + .setUnit("1") + .setDescription("Queue manager status") + .build(); + } + + public static LongGauge createIbmMqHeartbeat(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.heartbeat") + .ofLongs() + .setUnit("1") + .setDescription("Queue manager heartbeat") + .build(); + } + + public static LongGauge createIbmMqArchiveLogSize(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.archive.log.size") + .ofLongs() + .setUnit("By") + .setDescription("Queue manager archive log size") + .build(); + } + + public static LongGauge createIbmMqManagerMaxActiveChannels(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.manager.max.active.channels") + .ofLongs() + .setUnit("{channel}") + .setDescription("Queue manager max active channels") + .build(); + } + + public static LongGauge createIbmMqManagerStatisticsInterval(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.manager.statistics.interval") + .ofLongs() + .setUnit("1") + .setDescription("Queue manager statistics interval") + .build(); + } + + public static LongGauge createIbmMqPublishCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.publish.count") + .ofLongs() + .setUnit("{publication}") + .setDescription("Topic publication count") + .build(); + } + + public static LongGauge createIbmMqSubscriptionCount(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.subscription.count") + .ofLongs() + .setUnit("{subscription}") + .setDescription("Topic subscription count") + .build(); + } + + public static LongGauge createIbmMqListenerStatus(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.listener.status") + .ofLongs() + .setUnit("1") + .setDescription("Listener status") + .build(); + } + + public static LongCounter createIbmMqUnauthorizedEvent(Meter meter) { + return meter + .counterBuilder("ibm.mq.unauthorized.event") + .setUnit("{event}") + .setDescription("Number of authentication error events") + .build(); + } + + public static LongGauge createIbmMqManagerMaxHandles(Meter meter) { + return meter + .gaugeBuilder("ibm.mq.manager.max.handles") + .ofLongs() + .setUnit("{event}") + .setDescription("Max open handles") + .build(); + } + + public static LongCounter createIbmMqConnectionErrors(Meter meter) { + return meter + .counterBuilder("ibm.mq.connection.errors") + .setUnit("{errors}") + .setDescription("Number of connection errors") + .build(); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/MetricsConfig.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/MetricsConfig.java new file mode 100644 index 000000000..acc975f8f --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metrics/MetricsConfig.java @@ -0,0 +1,217 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metrics; + +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import java.util.Map; + +// This file is generated using weaver. Do not edit manually. + +/** Configuration of metrics as defined in config.yml. */ +public final class MetricsConfig { + + private final Map config; + + public MetricsConfig(ConfigWrapper config) { + this.config = config.getMetrics(); + } + + public boolean isIbmMqMessageRetryCountEnabled() { + return isEnabled("ibm.mq.message.retry.count"); + } + + public boolean isIbmMqStatusEnabled() { + return isEnabled("ibm.mq.status"); + } + + public boolean isIbmMqMaxSharingConversationsEnabled() { + return isEnabled("ibm.mq.max.sharing.conversations"); + } + + public boolean isIbmMqCurrentSharingConversationsEnabled() { + return isEnabled("ibm.mq.current.sharing.conversations"); + } + + public boolean isIbmMqByteReceivedEnabled() { + return isEnabled("ibm.mq.byte.received"); + } + + public boolean isIbmMqByteSentEnabled() { + return isEnabled("ibm.mq.byte.sent"); + } + + public boolean isIbmMqBuffersReceivedEnabled() { + return isEnabled("ibm.mq.buffers.received"); + } + + public boolean isIbmMqBuffersSentEnabled() { + return isEnabled("ibm.mq.buffers.sent"); + } + + public boolean isIbmMqMessageCountEnabled() { + return isEnabled("ibm.mq.message.count"); + } + + public boolean isIbmMqOpenInputCountEnabled() { + return isEnabled("ibm.mq.open.input.count"); + } + + public boolean isIbmMqOpenOutputCountEnabled() { + return isEnabled("ibm.mq.open.output.count"); + } + + public boolean isIbmMqHighQueueDepthEnabled() { + return isEnabled("ibm.mq.high.queue.depth"); + } + + public boolean isIbmMqServiceIntervalEnabled() { + return isEnabled("ibm.mq.service.interval"); + } + + public boolean isIbmMqQueueDepthFullEventEnabled() { + return isEnabled("ibm.mq.queue.depth.full.event"); + } + + public boolean isIbmMqQueueDepthHighEventEnabled() { + return isEnabled("ibm.mq.queue.depth.high.event"); + } + + public boolean isIbmMqQueueDepthLowEventEnabled() { + return isEnabled("ibm.mq.queue.depth.low.event"); + } + + public boolean isIbmMqUncommittedMessagesEnabled() { + return isEnabled("ibm.mq.uncommitted.messages"); + } + + public boolean isIbmMqOldestMsgAgeEnabled() { + return isEnabled("ibm.mq.oldest.msg.age"); + } + + public boolean isIbmMqCurrentMaxQueueFilesizeEnabled() { + return isEnabled("ibm.mq.current.max.queue.filesize"); + } + + public boolean isIbmMqCurrentQueueFilesizeEnabled() { + return isEnabled("ibm.mq.current.queue.filesize"); + } + + public boolean isIbmMqInstancesPerClientEnabled() { + return isEnabled("ibm.mq.instances.per.client"); + } + + public boolean isIbmMqMessageDeqCountEnabled() { + return isEnabled("ibm.mq.message.deq.count"); + } + + public boolean isIbmMqMessageEnqCountEnabled() { + return isEnabled("ibm.mq.message.enq.count"); + } + + public boolean isIbmMqQueueDepthEnabled() { + return isEnabled("ibm.mq.queue.depth"); + } + + public boolean isIbmMqServiceIntervalEventEnabled() { + return isEnabled("ibm.mq.service.interval.event"); + } + + public boolean isIbmMqReusableLogSizeEnabled() { + return isEnabled("ibm.mq.reusable.log.size"); + } + + public boolean isIbmMqManagerActiveChannelsEnabled() { + return isEnabled("ibm.mq.manager.active.channels"); + } + + public boolean isIbmMqRestartLogSizeEnabled() { + return isEnabled("ibm.mq.restart.log.size"); + } + + public boolean isIbmMqMaxQueueDepthEnabled() { + return isEnabled("ibm.mq.max.queue.depth"); + } + + public boolean isIbmMqOnqtimeShortPeriodEnabled() { + return isEnabled("ibm.mq.onqtime.short_period"); + } + + public boolean isIbmMqOnqtimeLongPeriodEnabled() { + return isEnabled("ibm.mq.onqtime.long_period"); + } + + public boolean isIbmMqMessageReceivedCountEnabled() { + return isEnabled("ibm.mq.message.received.count"); + } + + public boolean isIbmMqMessageSentCountEnabled() { + return isEnabled("ibm.mq.message.sent.count"); + } + + public boolean isIbmMqMaxInstancesEnabled() { + return isEnabled("ibm.mq.max.instances"); + } + + public boolean isIbmMqConnectionCountEnabled() { + return isEnabled("ibm.mq.connection.count"); + } + + public boolean isIbmMqManagerStatusEnabled() { + return isEnabled("ibm.mq.manager.status"); + } + + public boolean isIbmMqHeartbeatEnabled() { + return isEnabled("ibm.mq.heartbeat"); + } + + public boolean isIbmMqArchiveLogSizeEnabled() { + return isEnabled("ibm.mq.archive.log.size"); + } + + public boolean isIbmMqManagerMaxActiveChannelsEnabled() { + return isEnabled("ibm.mq.manager.max.active.channels"); + } + + public boolean isIbmMqManagerStatisticsIntervalEnabled() { + return isEnabled("ibm.mq.manager.statistics.interval"); + } + + public boolean isIbmMqPublishCountEnabled() { + return isEnabled("ibm.mq.publish.count"); + } + + public boolean isIbmMqSubscriptionCountEnabled() { + return isEnabled("ibm.mq.subscription.count"); + } + + public boolean isIbmMqListenerStatusEnabled() { + return isEnabled("ibm.mq.listener.status"); + } + + public boolean isIbmMqUnauthorizedEventEnabled() { + return isEnabled("ibm.mq.unauthorized.event"); + } + + public boolean isIbmMqManagerMaxHandlesEnabled() { + return isEnabled("ibm.mq.manager.max.handles"); + } + + public boolean isIbmMqConnectionErrorsEnabled() { + return isEnabled("ibm.mq.connection.errors"); + } + + private boolean isEnabled(String key) { + Object metricInfo = config.get(key); + if (!(metricInfo instanceof Map)) { + return false; + } + Object enabled = ((Map) metricInfo).get("enabled"); + if (enabled instanceof Boolean) { + return (Boolean) enabled; + } + return false; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollector.java new file mode 100644 index 000000000..44afdb28a --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollector.java @@ -0,0 +1,231 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static com.ibm.mq.constants.CMQC.MQRC_SELECTOR_ERROR; +import static com.ibm.mq.constants.CMQCFC.MQRCCF_CHL_STATUS_NOT_FOUND; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_CHANNEL_NAME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_CHANNEL_START_TIME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_CHANNEL_TYPE; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_JOB_NAME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** This class is responsible for channel metric collection. */ +public final class ChannelMetricsCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(ChannelMetricsCollector.class); + + private final LongGauge activeChannelsGauge; + private final LongGauge channelStatusGauge; + private final LongGauge messageCountGauge; + private final LongGauge byteSentGauge; + private final LongGauge byteReceivedGauge; + private final LongGauge buffersSentGauge; + private final LongGauge buffersReceivedGauge; + private final LongGauge currentSharingConvsGauge; + private final LongGauge maxSharingConvsGauge; + + /* + * The Channel Status values are mentioned here http://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.dev.doc/q090880_.htm + */ + public ChannelMetricsCollector(Meter meter) { + this.activeChannelsGauge = Metrics.createIbmMqManagerActiveChannels(meter); + this.channelStatusGauge = Metrics.createIbmMqStatus(meter); + this.messageCountGauge = Metrics.createIbmMqMessageCount(meter); + this.byteSentGauge = Metrics.createIbmMqByteSent(meter); + this.byteReceivedGauge = Metrics.createIbmMqByteReceived(meter); + this.buffersSentGauge = Metrics.createIbmMqBuffersSent(meter); + this.buffersReceivedGauge = Metrics.createIbmMqBuffersReceived(meter); + this.currentSharingConvsGauge = Metrics.createIbmMqCurrentSharingConversations(meter); + this.maxSharingConvsGauge = Metrics.createIbmMqMaxSharingConversations(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting metrics for command MQCMD_INQUIRE_CHANNEL_STATUS"); + long entryTime = System.currentTimeMillis(); + + int[] attrs = + new int[] { + CMQCFC.MQCACH_CHANNEL_NAME, + CMQCFC.MQCACH_CONNECTION_NAME, + CMQCFC.MQIACH_CHANNEL_TYPE, + CMQCFC.MQIACH_MSGS, + CMQCFC.MQIACH_CHANNEL_STATUS, + CMQCFC.MQIACH_BYTES_SENT, + CMQCFC.MQIACH_BYTES_RECEIVED, + CMQCFC.MQIACH_BUFFERS_SENT, + CMQCFC.MQIACH_BUFFERS_RECEIVED, + CMQCFC.MQIACH_CURRENT_SHARING_CONVS, + CMQCFC.MQIACH_MAX_SHARING_CONVS, + CMQCFC.MQCACH_CHANNEL_START_DATE, + CMQCFC.MQCACH_CHANNEL_START_TIME, + CMQCFC.MQCACH_MCA_JOB_NAME + }; + if (logger.isDebugEnabled()) { + logger.debug( + "Attributes being sent along PCF agent request to query channel metrics: {}", + Arrays.toString(attrs)); + } + + Set channelGenericNames = context.getChannelIncludeFilterNames(); + + // + // The MQCMD_INQUIRE_CHANNEL_STATUS command queries the current operational status of channels. + // This includes information about whether a channel is running, stopped, or in another state, + // as well as details about the channel’s performance and usage. + List activeChannels = new ArrayList<>(); + for (String channelGenericName : channelGenericNames) { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_CHANNEL_STATUS); + request.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, channelGenericName); + request.addParameter(CMQCFC.MQIACH_CHANNEL_INSTANCE_TYPE, CMQC.MQOT_CURRENT_CHANNEL); + request.addParameter(CMQCFC.MQIACH_CHANNEL_INSTANCE_ATTRS, attrs); + try { + logger.debug( + "sending PCF agent request to query metrics for generic channel {}", + channelGenericName); + long startTime = System.currentTimeMillis(); + List response = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent queue metrics query response for generic queue {} received in {} milliseconds", + channelGenericName, + endTime); + if (response.isEmpty()) { + logger.debug("Unexpected error while PCFMessage.send(), response is empty"); + return; + } + + List messages = + MessageFilter.ofKind("channel") + .excluding(context.getChannelExcludeFilters()) + .withResourceExtractor(MessageBuddy::channelName) + .filter(response); + + for (PCFMessage message : messages) { + String channelName = MessageBuddy.channelName(message); + String channelType = MessageBuddy.channelType(message); + long channelStartTime = MessageBuddy.channelStartTime(message); + String jobName = MessageBuddy.jobName(message); + + logger.debug("Pulling out metrics for channel name {}", channelName); + updateMetrics( + context, + message, + channelName, + channelType, + channelStartTime, + jobName, + activeChannels); + } + } catch (PCFException pcfe) { + if (pcfe.getReason() == MQRCCF_CHL_STATUS_NOT_FOUND) { + String errorMsg = "Channel- " + channelGenericName + " :"; + errorMsg += + "Could not collect channel information as channel is stopped or inactive: Reason '3065'\n"; + errorMsg += + "If the channel type is MQCHT_RECEIVER, MQCHT_SVRCONN or MQCHT_CLUSRCVR, then the only action is to enable the channel, not start it."; + logger.error(errorMsg, pcfe); + } else if (pcfe.getReason() == MQRC_SELECTOR_ERROR) { + logger.error( + "Invalid metrics passed while collecting channel metrics, check config.yaml: Reason '2067'", + pcfe); + } else { + logger.error(pcfe.getMessage(), pcfe); + } + } catch (Exception e) { + logger.error( + "Unexpected error occurred while collecting metrics for channel " + channelGenericName, + e); + } + } + + logger.info( + "Active Channels in queueManager {} are {}", context.getQueueManagerName(), activeChannels); + activeChannelsGauge.set( + activeChannels.size(), Attributes.of(IBM_MQ_QUEUE_MANAGER, context.getQueueManagerName())); + + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug("Time taken to publish metrics for all channels is {} milliseconds", exitTime); + } + + private void updateMetrics( + MetricsCollectorContext context, + PCFMessage message, + String channelName, + String channelType, + long channelStartTime, + String jobName, + List activeChannels) + throws PCFException { + Attributes attributes = + Attributes.builder() + .put(IBM_MQ_CHANNEL_NAME, channelName) + .put(IBM_MQ_CHANNEL_TYPE, channelType) + .put(IBM_MQ_QUEUE_MANAGER, context.getQueueManagerName()) + .put(IBM_MQ_CHANNEL_START_TIME, channelStartTime) + .put(IBM_MQ_JOB_NAME, jobName) + .build(); + if (context.getMetricsConfig().isIbmMqMessageCountEnabled()) { + int received = message.getIntParameterValue(CMQCFC.MQIACH_MSGS); + messageCountGauge.set(received, attributes); + } + int status = message.getIntParameterValue(CMQCFC.MQIACH_CHANNEL_STATUS); + if (context.getMetricsConfig().isIbmMqStatusEnabled()) { + channelStatusGauge.set(status, attributes); + } + // We follow the definition of active channel as documented in + // https://www.ibm.com/docs/en/ibm-mq/9.2.x?topic=states-current-active + if (status != CMQCFC.MQCHS_RETRYING + && status != CMQCFC.MQCHS_STOPPED + && status != CMQCFC.MQCHS_STARTING) { + activeChannels.add(channelName); + } + if (context.getMetricsConfig().isIbmMqByteSentEnabled()) { + byteSentGauge.set(message.getIntParameterValue(CMQCFC.MQIACH_BYTES_SENT), attributes); + } + if (context.getMetricsConfig().isIbmMqByteReceivedEnabled()) { + byteReceivedGauge.set(message.getIntParameterValue(CMQCFC.MQIACH_BYTES_RECEIVED), attributes); + } + if (context.getMetricsConfig().isIbmMqBuffersSentEnabled()) { + buffersSentGauge.set(message.getIntParameterValue(CMQCFC.MQIACH_BUFFERS_SENT), attributes); + } + if (context.getMetricsConfig().isIbmMqBuffersReceivedEnabled()) { + buffersReceivedGauge.set( + message.getIntParameterValue(CMQCFC.MQIACH_BUFFERS_RECEIVED), attributes); + } + if (context.getMetricsConfig().isIbmMqCurrentSharingConversationsEnabled()) { + int currentSharingConvs = 0; + if (message.getParameter(CMQCFC.MQIACH_CURRENT_SHARING_CONVS) != null) { + currentSharingConvs = message.getIntParameterValue(CMQCFC.MQIACH_CURRENT_SHARING_CONVS); + } + currentSharingConvsGauge.set(currentSharingConvs, attributes); + } + if (context.getMetricsConfig().isIbmMqMaxSharingConversationsEnabled()) { + int maxSharingConvs = 0; + if (message.getParameter(CMQCFC.MQIACH_MAX_SHARING_CONVS) != null) { + maxSharingConvs = message.getIntParameterValue(CMQCFC.MQIACH_MAX_SHARING_CONVS); + } + maxSharingConvsGauge.set(maxSharingConvs, attributes); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/FilterType.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/FilterType.java new file mode 100644 index 000000000..668ba2ac0 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/FilterType.java @@ -0,0 +1,14 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +public enum FilterType { + STARTSWITH, + EQUALS, + ENDSWITH, + CONTAINS, + NONE +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollector.java new file mode 100644 index 000000000..b0dc7286c --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollector.java @@ -0,0 +1,149 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_CHANNEL_NAME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_CHANNEL_TYPE; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.headers.pcf.MQCFIL; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** This class is responsible for channel inquiry metric collection. */ +public final class InquireChannelCmdCollector implements Consumer { + + public static final Logger logger = LoggerFactory.getLogger(InquireChannelCmdCollector.class); + private final LongGauge maxClientsGauge; + private final LongGauge instancesPerClientGauge; + private final LongGauge messageRetryCountGauge; + private final LongGauge messageReceivedCountGauge; + private final LongGauge messageSentCountGauge; + + public InquireChannelCmdCollector(Meter meter) { + this.maxClientsGauge = Metrics.createIbmMqMaxInstances(meter); + this.instancesPerClientGauge = Metrics.createIbmMqInstancesPerClient(meter); + this.messageRetryCountGauge = Metrics.createIbmMqMessageRetryCount(meter); + this.messageReceivedCountGauge = Metrics.createIbmMqMessageReceivedCount(meter); + this.messageSentCountGauge = Metrics.createIbmMqMessageSentCount(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + + Set channelGenericNames = context.getChannelIncludeFilterNames(); + + for (String channelGenericName : channelGenericNames) { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_CHANNEL); + request.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, channelGenericName); + request.addParameter( + new MQCFIL(MQConstants.MQIACF_CHANNEL_ATTRS, new int[] {MQConstants.MQIACF_ALL})); + try { + logger.debug( + "sending PCF agent request to query metrics for generic channel {}", + channelGenericName); + long startTime = System.currentTimeMillis(); + List response = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent queue metrics query response for generic queue {} received in {} milliseconds", + channelGenericName, + endTime); + if (response.isEmpty()) { + logger.warn("Unexpected error while PCFMessage.send(), response is empty"); + return; + } + + List messages = + MessageFilter.ofKind("channel") + .excluding(context.getChannelExcludeFilters()) + .withResourceExtractor(MessageBuddy::channelName) + .filter(response); + + for (PCFMessage message : messages) { + String channelName = MessageBuddy.channelName(message); + String channelType = MessageBuddy.channelType(message); + logger.debug("Pulling out metrics for channel name {}", channelName); + updateMetrics(message, channelName, channelType, context); + } + } catch (PCFException pcfe) { + if (pcfe.getReason() == MQConstants.MQRCCF_CHL_STATUS_NOT_FOUND) { + String errorMsg = "Channel- " + channelGenericName + " :"; + errorMsg += + "Could not collect channel information as channel is stopped or inactive: Reason '3065'\n"; + errorMsg += + "If the channel type is MQCHT_RECEIVER, MQCHT_SVRCONN or MQCHT_CLUSRCVR, then the only action is to enable the channel, not start it."; + logger.error(errorMsg, pcfe); + } else if (pcfe.getReason() == MQConstants.MQRC_SELECTOR_ERROR) { + logger.error( + "Invalid metrics passed while collecting channel metrics, check config.yaml: Reason '2067'", + pcfe); + } + logger.error(pcfe.getMessage(), pcfe); + } catch (Exception e) { + logger.error( + "Unexpected error while collecting metrics for channel " + channelGenericName, e); + } + } + + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug("Time taken to publish metrics for all channels is {} milliseconds", exitTime); + } + + private void updateMetrics( + PCFMessage message, String channelName, String channelType, MetricsCollectorContext context) + throws PCFException { + Attributes attributes = + Attributes.builder() + .put(IBM_MQ_CHANNEL_NAME, channelName) + .put(IBM_MQ_CHANNEL_TYPE, channelType) + .put(IBM_MQ_QUEUE_MANAGER, context.getQueueManagerName()) + .build(); + if (context.getMetricsConfig().isIbmMqMaxInstancesEnabled() + && message.getParameter(CMQCFC.MQIACH_MAX_INSTANCES) != null) { + this.maxClientsGauge.set( + message.getIntParameterValue(CMQCFC.MQIACH_MAX_INSTANCES), attributes); + } + if (context.getMetricsConfig().isIbmMqInstancesPerClientEnabled() + && message.getParameter(CMQCFC.MQIACH_MAX_INSTS_PER_CLIENT) != null) { + this.instancesPerClientGauge.set( + message.getIntParameterValue(CMQCFC.MQIACH_MAX_INSTS_PER_CLIENT), attributes); + } + if (context.getMetricsConfig().isIbmMqMessageRetryCountEnabled()) { + int count = 0; + if (message.getParameter(CMQCFC.MQIACH_MR_COUNT) != null) { + count = message.getIntParameterValue(CMQCFC.MQIACH_MR_COUNT); + } + this.messageRetryCountGauge.set(count, attributes); + } + if (context.getMetricsConfig().isIbmMqInstancesPerClientEnabled()) { + int received = 0; + if (message.getParameter(CMQCFC.MQIACH_MSGS_RECEIVED) != null) { + received = message.getIntParameterValue(CMQCFC.MQIACH_MSGS_RECEIVED); + } + this.messageReceivedCountGauge.set(received, attributes); + } + if (context.getMetricsConfig().isIbmMqMessageSentCountEnabled()) { + int sent = 0; + if (message.getParameter(CMQCFC.MQIACH_MSGS_SENT) != null) { + sent = message.getIntParameterValue(CMQCFC.MQIACH_MSGS_SENT); + } + this.messageSentCountGauge.set(sent, attributes); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQCmdCollector.java new file mode 100644 index 000000000..16164b46d --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQCmdCollector.java @@ -0,0 +1,69 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import java.util.Arrays; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class InquireQCmdCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(InquireQCmdCollector.class); + + static final int[] ATTRIBUTES = + new int[] { + CMQC.MQCA_Q_NAME, + CMQC.MQIA_USAGE, + CMQC.MQIA_Q_TYPE, + CMQC.MQIA_CURRENT_Q_DEPTH, + CMQC.MQIA_MAX_Q_DEPTH, + CMQC.MQIA_OPEN_INPUT_COUNT, + CMQC.MQIA_OPEN_OUTPUT_COUNT, + CMQC.MQIA_Q_SERVICE_INTERVAL, + CMQC.MQIA_Q_SERVICE_INTERVAL_EVENT + }; + + static final String COMMAND = "MQCMD_INQUIRE_Q"; + private final QueueCollectionBuddy queueBuddy; + + public InquireQCmdCollector(QueueCollectionBuddy queueBuddy) { + this.queueBuddy = queueBuddy; + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting metrics for command {}", COMMAND); + long entryTime = System.currentTimeMillis(); + + logger.debug( + "Attributes being sent along PCF agent request to query queue metrics: {} for command {}", + Arrays.toString(ATTRIBUTES), + COMMAND); + + Set queueGenericNames = context.getQueueIncludeFilterNames(); + for (String queueGenericName : queueGenericNames) { + // list of all metrics extracted through MQCMD_INQUIRE_Q is mentioned here + // https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q087810_.htm + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q); + request.addParameter(CMQC.MQCA_Q_NAME, queueGenericName); + request.addParameter(CMQC.MQIA_Q_TYPE, CMQC.MQQT_ALL); + request.addParameter(CMQCFC.MQIACF_Q_ATTRS, ATTRIBUTES); + + queueBuddy.processPcfRequestAndPublishQMetrics( + context, request, queueGenericName, ATTRIBUTES); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for all queues is {} milliseconds for command {}", + exitTime, + COMMAND); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQStatusCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQStatusCmdCollector.java new file mode 100644 index 000000000..3580c02b1 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQStatusCmdCollector.java @@ -0,0 +1,74 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The InquireQStatusCmdCollector class is responsible for collecting and publishing queue metrics + * using the IBM MQ command `MQCMD_INQUIRE_Q_STATUS`. It extends the QueueMetricsCollector class and + * implements the Runnable interface, enabling execution within a separate thread. + * + *

This class interacts with PCF (Programmable Command Formats) messages to query queue metrics + * based on the configuration provided. It retrieves status information about a queue, such as: • + * The number of messages on the queue • Open handles (how many apps have it open) • Whether the + * queue is in use for input/output • Last get/put timestamps • And other real-time statistics + * + *

Thread Safety: This class is thread-safe, as it operates independently with state shared only + * through immutable or synchronized structures where necessary. + * + *

Usage: - Instantiate this class by providing an existing QueueMetricsCollector instance, a map + * of metrics to report, and shared state. - Invoke the run method to execute the queue metrics + * collection process. + */ +final class InquireQStatusCmdCollector implements Consumer { + + static final int[] ATTRIBUTES = + new int[] { + CMQC.MQCA_Q_NAME, + CMQCFC.MQIACF_CUR_Q_FILE_SIZE, + CMQCFC.MQIACF_CUR_MAX_FILE_SIZE, + CMQCFC.MQIACF_OLDEST_MSG_AGE, + CMQCFC.MQIACF_UNCOMMITTED_MSGS, + CMQCFC.MQIACF_Q_TIME_INDICATOR, + CMQC.MQIA_CURRENT_Q_DEPTH, + }; + + private static final Logger logger = LoggerFactory.getLogger(InquireQStatusCmdCollector.class); + + private final QueueCollectionBuddy queueBuddy; + + InquireQStatusCmdCollector(QueueCollectionBuddy queueBuddy) { + this.queueBuddy = queueBuddy; + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting metrics for command MQCMD_INQUIRE_Q_STATUS"); + long entryTime = System.currentTimeMillis(); + + Set queueGenericNames = context.getQueueIncludeFilterNames(); + for (String queueGenericName : queueGenericNames) { + // list of all metrics extracted through MQCMD_INQUIRE_Q_STATUS is mentioned here + // https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.ref.adm.doc/q087880_.htm + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q_STATUS); + request.addParameter(CMQC.MQCA_Q_NAME, queueGenericName); + request.addParameter(CMQCFC.MQIACF_Q_STATUS_ATTRS, ATTRIBUTES); + queueBuddy.processPcfRequestAndPublishQMetrics( + context, request, queueGenericName, ATTRIBUTES); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for all queues is {} milliseconds for command MQCMD_INQUIRE_Q_STATUS", + exitTime); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQueueManagerCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQueueManagerCmdCollector.java new file mode 100644 index 000000000..5cdf0c52f --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireQueueManagerCmdCollector.java @@ -0,0 +1,76 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.headers.pcf.MQCFIL; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.util.List; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** This class is responsible for queue metric collection. */ +public final class InquireQueueManagerCmdCollector implements Consumer { + + private static final Logger logger = + LoggerFactory.getLogger(InquireQueueManagerCmdCollector.class); + private final LongGauge statisticsIntervalGauge; + + public InquireQueueManagerCmdCollector(Meter meter) { + this.statisticsIntervalGauge = Metrics.createIbmMqManagerStatisticsInterval(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + logger.debug( + "publishMetrics entry time for queuemanager {} is {} milliseconds", + context.getQueueManagerName(), + entryTime); + // CMQCFC.MQCMD_INQUIRE_Q_MGR is 2 + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q_MGR); + // request.addParameter(CMQC.MQCA_Q_MGR_NAME, "*"); + // CMQCFC.MQIACF_Q_MGR_STATUS_ATTRS is 1001 + request.addParameter( + new MQCFIL(MQConstants.MQIACF_Q_MGR_ATTRS, new int[] {MQConstants.MQIACF_ALL})); + try { + // Note that agent.send() method is synchronized + logger.debug( + "sending PCF agent request to query queuemanager {}", context.getQueueManagerName()); + long startTime = System.currentTimeMillis(); + List responses = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent queuemanager metrics query response for {} received in {} milliseconds", + context.getQueueManagerName(), + endTime); + if (responses.isEmpty()) { + logger.debug("Unexpected error while PCFMessage.send(), response is either null or empty"); + return; + } + if (context.getMetricsConfig().isIbmMqManagerStatisticsIntervalEnabled()) { + int interval = responses.get(0).getIntParameterValue(CMQC.MQIA_STATISTICS_INTERVAL); + statisticsIntervalGauge.set( + interval, Attributes.of(IBM_MQ_QUEUE_MANAGER, context.getQueueManagerName())); + } + } catch (Exception e) { + logger.error("Error collecting QueueManagerCmd metrics", e); + throw new IllegalStateException(e); + } finally { + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug("Time taken to publish metrics for queuemanager is {} milliseconds", exitTime); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireTStatusCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireTStatusCmdCollector.java new file mode 100644 index 000000000..bb3812565 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/InquireTStatusCmdCollector.java @@ -0,0 +1,136 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.MESSAGING_DESTINATION_NAME; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.MQDataException; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class InquireTStatusCmdCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(InquireTStatusCmdCollector.class); + + private final LongGauge publishCountGauge; + private final LongGauge subscriptionCountGauge; + + public InquireTStatusCmdCollector(Meter meter) { + this.publishCountGauge = Metrics.createIbmMqPublishCount(meter); + this.subscriptionCountGauge = Metrics.createIbmMqSubscriptionCount(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting metrics for command MQCMD_INQUIRE_TOPIC_STATUS"); + long entryTime = System.currentTimeMillis(); + + Set topicGenericNames = context.getTopicIncludeFilterNames(); + // to query the current status of topics, which is essential for monitoring and managing the + // publish/subscribe environment in IBM MQ. + for (String topicGenericName : topicGenericNames) { + // Request: + // https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.ref.adm.doc/q088140_.htm + // list of all metrics extracted through MQCMD_INQUIRE_TOPIC_STATUS is mentioned here + // https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.ref.adm.doc/q088150_.htm + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_TOPIC_STATUS); + request.addParameter(CMQC.MQCA_TOPIC_STRING, topicGenericName); + + try { + processPcfRequestAndPublishQMetrics(context, topicGenericName, request); + } catch (PCFException pcfe) { + logger.error( + "PCFException caught while collecting metric for Queue: {} for command MQCMD_INQUIRE_TOPIC_STATUS", + topicGenericName, + pcfe); + PCFMessage[] msgs = (PCFMessage[]) pcfe.exceptionSource; + for (PCFMessage msg : msgs) { + logger.error(msg.toString()); + } + // Don't throw exception as it will stop queue metric colloection + } catch (Exception mqe) { + logger.error("MQException caught", mqe); + // Dont throw exception as it will stop queuemetric colloection + } + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for all queues is {} milliseconds for command MQCMD_INQUIRE_TOPIC_STATUS", + exitTime); + } + + private void processPcfRequestAndPublishQMetrics( + MetricsCollectorContext context, String topicGenericName, PCFMessage request) + throws IOException, MQDataException { + logger.debug( + "sending PCF agent request to topic metrics for generic topic {} for command MQCMD_INQUIRE_TOPIC_STATUS", + topicGenericName); + long startTime = System.currentTimeMillis(); + List response = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent topic metrics query response for generic topic {} for command MQCMD_INQUIRE_TOPIC_STATUS received in {} milliseconds", + topicGenericName, + endTime); + if (response.isEmpty()) { + logger.debug( + "Unexpected error while PCFMessage.send() for command MQCMD_INQUIRE_TOPIC_STATUS, response is either null or empty"); + return; + } + + List messages = + MessageFilter.ofKind("topic") + .excluding(context.getTopicExcludeFilters()) + .withResourceExtractor(MessageBuddy::topicName) + .filter(response); + + for (PCFMessage message : messages) { + String topicName = MessageBuddy.topicName(message); + logger.debug( + "Pulling out metrics for topic name {} for command MQCMD_INQUIRE_TOPIC_STATUS", + topicName); + extractMetrics(context, message, topicName); + } + } + + private void extractMetrics( + MetricsCollectorContext context, PCFMessage pcfMessage, String topicString) + throws PCFException { + Attributes attributes = + Attributes.of( + MESSAGING_DESTINATION_NAME, + topicString, + IBM_MQ_QUEUE_MANAGER, + context.getQueueManagerName()); + if (context.getMetricsConfig().isIbmMqPublishCountEnabled()) { + int publisherCount = 0; + if (pcfMessage.getParameter(CMQC.MQIA_PUB_COUNT) != null) { + publisherCount = pcfMessage.getIntParameterValue(CMQC.MQIA_PUB_COUNT); + } + publishCountGauge.set(publisherCount, attributes); + } + if (context.getMetricsConfig().isIbmMqSubscriptionCountEnabled()) { + int subscriberCount = 0; + if (pcfMessage.getParameter(CMQC.MQIA_SUB_COUNT) != null) { + subscriberCount = pcfMessage.getIntParameterValue(CMQC.MQIA_SUB_COUNT); + } + subscriptionCountGauge.set(subscriberCount, attributes); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollector.java new file mode 100644 index 000000000..a7bf3d48d --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollector.java @@ -0,0 +1,113 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_LISTENER_NAME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * ListenerMetricsCollector is a specialized implementation of the MetricsCollector that is + * responsible for collecting and publishing metrics related to IBM MQ Listeners. + * + *

This class interacts with PCFMessageAgent to query metrics for specific listeners, applies + * "include:" and "exclude:" listenerFilters defined in config yaml, and uses MetricWriteHelper to + * publish the collected metrics in the required format. + * + *

Key functionalities include: • query using PCF Command: MQCMD_INQUIRE_LISTENER_STATUS to get + * the status of one or more listeners on a queue manager. • retrieve tcp/ip listeners runtime + * information such as: - listener is running or stopped - port number and transport type - last + * error codes - associated command server • + * + *

It utilizes WMQMetricOverride to map metrics from the configuration to their IBM MQ constants. + */ +public final class ListenerMetricsCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(ListenerMetricsCollector.class); + private final LongGauge listenerStatusGauge; + + public ListenerMetricsCollector(Meter meter) { + this.listenerStatusGauge = Metrics.createIbmMqListenerStatus(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + + int[] attrs = new int[] {CMQCFC.MQCACH_LISTENER_NAME, CMQCFC.MQIACH_LISTENER_STATUS}; + logger.debug( + "Attributes being sent along PCF agent request to query channel metrics: " + + Arrays.toString(attrs)); + + Set listenerGenericNames = context.getListenerIncludeFilterNames(); + for (String listenerGenericName : listenerGenericNames) { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_LISTENER_STATUS); + request.addParameter(CMQCFC.MQCACH_LISTENER_NAME, listenerGenericName); + request.addParameter(CMQCFC.MQIACF_LISTENER_STATUS_ATTRS, attrs); + try { + logger.debug( + "sending PCF agent request to query metrics for generic listener {}", + listenerGenericName); + long startTime = System.currentTimeMillis(); + List response = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent listener metrics query response for generic listener {} received in {} milliseconds", + listenerGenericName, + endTime); + if (response.isEmpty()) { + logger.debug("Unexpected error while PCFMessage.send(), response is empty"); + return; + } + + List messages = + MessageFilter.ofKind("listener") + .excluding(context.getListenerExcludeFilters()) + .withResourceExtractor(MessageBuddy::listenerName) + .filter(response); + + for (PCFMessage message : messages) { + String listenerName = MessageBuddy.listenerName(message); + logger.debug("Pulling out metrics for listener name {}", listenerName); + updateMetrics(message, listenerName, context); + } + } catch (Exception e) { + logger.error( + "Unexpected error while collecting metrics for listener " + listenerGenericName, e); + } + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug("Time taken to publish metrics for all listener is {} milliseconds", exitTime); + } + + private void updateMetrics( + PCFMessage message, String listenerName, MetricsCollectorContext context) + throws PCFException { + if (context.getMetricsConfig().isIbmMqListenerStatusEnabled()) { + int status = message.getIntParameterValue(CMQCFC.MQIACH_LISTENER_STATUS); + listenerStatusGauge.set( + status, + Attributes.of( + IBM_MQ_LISTENER_NAME, + listenerName, + IBM_MQ_QUEUE_MANAGER, + context.getQueueManagerName())); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageBuddy.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageBuddy.java new file mode 100644 index 000000000..688f9541d --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageBuddy.java @@ -0,0 +1,75 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.CMQXC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import java.time.Instant; + +public final class MessageBuddy { + + private MessageBuddy() {} + + static String channelName(PCFMessage message) throws PCFException { + return message.getStringParameterValue(CMQCFC.MQCACH_CHANNEL_NAME).trim(); + } + + static String channelType(PCFMessage message) throws PCFException { + switch (message.getIntParameterValue(CMQCFC.MQIACH_CHANNEL_TYPE)) { + case CMQXC.MQCHT_SENDER: + return "sender"; + case CMQXC.MQCHT_SERVER: + return "server"; + case CMQXC.MQCHT_RECEIVER: + return "receiver"; + case CMQXC.MQCHT_REQUESTER: + return "requester"; + case CMQXC.MQCHT_SVRCONN: + return "server-connection"; + case CMQXC.MQCHT_CLNTCONN: + return "client-connection"; + case CMQXC.MQCHT_CLUSRCVR: + return "cluster-receiver"; + case CMQXC.MQCHT_CLUSSDR: + return "cluster-sender"; + case CMQXC.MQCHT_MQTT: + return "mqtt"; + case CMQXC.MQCHT_AMQP: + return "amqp"; + default: + throw new IllegalArgumentException( + "Unsupported channel type: " + + message.getIntParameterValue(CMQCFC.MQIACH_CHANNEL_TYPE)); + } + } + + static String topicName(PCFMessage message) throws PCFException { + return message.getStringParameterValue(CMQC.MQCA_TOPIC_STRING).trim(); + } + + public static String listenerName(PCFMessage message) throws PCFException { + return message.getStringParameterValue(CMQCFC.MQCACH_LISTENER_NAME).trim(); + } + + public static String queueName(PCFMessage message) throws PCFException { + return message.getStringParameterValue(CMQC.MQCA_Q_NAME).trim(); + } + + public static long channelStartTime(PCFMessage message) throws PCFException { + String date = message.getStringParameterValue(CMQCFC.MQCACH_CHANNEL_START_DATE).trim(); + String time = message.getStringParameterValue(CMQCFC.MQCACH_CHANNEL_START_TIME).trim(); + + Instant parsed = Instant.parse(date + "T" + time.replaceAll("\\.", ":") + "Z"); + return parsed.getEpochSecond(); + } + + public static String jobName(PCFMessage message) throws PCFException { + return message.getStringParameterValue(CMQCFC.MQCACH_MCA_JOB_NAME).trim(); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageFilter.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageFilter.java new file mode 100644 index 000000000..531c217b0 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MessageFilter.java @@ -0,0 +1,77 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.ibm.mq.config.ExcludeFilters; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Helps to consolidate repeated exclude/filtering logic. */ +class MessageFilter { + + private static final Logger logger = LoggerFactory.getLogger(MessageFilter.class); + + private final String kind; + private final Collection filters; + private final ResourceExtractor extractor; + + private MessageFilter( + String kind, Collection filters, ResourceExtractor extractor) { + this.kind = kind; + this.filters = filters; + this.extractor = extractor; + } + + static MessageFilterBuilder ofKind(String kind) { + return new MessageFilterBuilder(kind); + } + + public List filter(List messages) throws PCFException { + List result = new ArrayList<>(); + for (PCFMessage message : messages) { + String resourceName = extractor.apply(message); + if (ExcludeFilters.isExcluded(resourceName, filters)) { + logger.debug("{} name = {} is excluded.", kind, resourceName); + } else { + result.add(message); + } + } + return result; + } + + static class MessageFilterBuilder { + + private final String kind; + private Set filters = new HashSet<>(); + + public MessageFilterBuilder(String kind) { + this.kind = kind; + } + + @CanIgnoreReturnValue + public MessageFilterBuilder excluding(Set filters) { + this.filters = new HashSet<>(filters); + return this; + } + + public MessageFilter withResourceExtractor(ResourceExtractor extractor) { + return new MessageFilter(kind, filters, extractor); + } + } + + interface ResourceExtractor { + // Ugh, exceptions everywhere, huh? + String apply(PCFMessage message) throws PCFException; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MetricsCollectorContext.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MetricsCollectorContext.java new file mode 100644 index 000000000..dab85748e --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/MetricsCollectorContext.java @@ -0,0 +1,101 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static java.util.Collections.emptyList; + +import com.ibm.mq.MQQueueManager; +import com.ibm.mq.headers.MQDataException; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.ibm.mq.config.ExcludeFilters; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import javax.annotation.concurrent.Immutable; +import org.jetbrains.annotations.NotNull; + +/** + * A temporary bundle to contain the collaborators of the original MetricsCollector base class until + * we can finish unwinding things. When done and there are no longer usages of MetricsCollector, we + * could consider renaming this. + */ +@Immutable +public final class MetricsCollectorContext { + + private final QueueManager queueManager; + private final PCFMessageAgent agent; + private final MQQueueManager mqQueueManager; + private final MetricsConfig metricsConfig; + + public MetricsCollectorContext( + QueueManager queueManager, + PCFMessageAgent agent, + MQQueueManager mqQueueManager, + MetricsConfig metricsConfig) { + this.queueManager = queueManager; + this.agent = agent; + this.mqQueueManager = mqQueueManager; + this.metricsConfig = metricsConfig; + } + + Set getChannelIncludeFilterNames() { + return queueManager.getChannelFilters().getInclude(); + } + + Set getChannelExcludeFilters() { + return queueManager.getChannelFilters().getExclude(); + } + + Set getListenerIncludeFilterNames() { + return queueManager.getListenerFilters().getInclude(); + } + + Set getListenerExcludeFilters() { + return queueManager.getListenerFilters().getExclude(); + } + + Set getTopicIncludeFilterNames() { + return queueManager.getTopicFilters().getInclude(); + } + + Set getTopicExcludeFilters() { + return queueManager.getTopicFilters().getExclude(); + } + + Set getQueueIncludeFilterNames() { + return queueManager.getQueueFilters().getInclude(); + } + + Set getQueueExcludeFilters() { + return queueManager.getQueueFilters().getExclude(); + } + + @NotNull + List send(PCFMessage request) throws IOException, MQDataException { + PCFMessage[] result = agent.send(request); + return result == null ? emptyList() : Arrays.asList(result); + } + + String getQueueManagerName() { + return queueManager.getName(); + } + + QueueManager getQueueManager() { + return queueManager; + } + + public MQQueueManager getMqQueueManager() { + return mqQueueManager; + } + + public MetricsConfig getMetricsConfig() { + return metricsConfig; + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/PerformanceEventQueueCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/PerformanceEventQueueCollector.java new file mode 100644 index 000000000..db1d4a254 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/PerformanceEventQueueCollector.java @@ -0,0 +1,131 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.MESSAGING_DESTINATION_NAME; + +import com.ibm.mq.MQException; +import com.ibm.mq.MQGetMessageOptions; +import com.ibm.mq.MQMessage; +import com.ibm.mq.MQQueue; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.io.IOException; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// Captures metrics from events logged to the queue manager performance event queue. +public final class PerformanceEventQueueCollector implements Consumer { + + private static final Logger logger = + LoggerFactory.getLogger(PerformanceEventQueueCollector.class); + private final LongCounter fullQueueDepthCounter; + private final LongCounter highQueueDepthCounter; + private final LongCounter lowQueueDepthCounter; + + public PerformanceEventQueueCollector(Meter meter) { + this.fullQueueDepthCounter = Metrics.createIbmMqQueueDepthFullEvent(meter); + this.highQueueDepthCounter = Metrics.createIbmMqQueueDepthHighEvent(meter); + this.lowQueueDepthCounter = Metrics.createIbmMqQueueDepthLowEvent(meter); + } + + private void readEvents(MetricsCollectorContext context, String performanceEventsQueueName) + throws Exception { + + MQQueue queue = null; + int counter = 0; + try { + int queueAccessOptions = MQConstants.MQOO_FAIL_IF_QUIESCING | MQConstants.MQOO_INPUT_SHARED; + queue = + context.getMqQueueManager().accessQueue(performanceEventsQueueName, queueAccessOptions); + // keep going until receiving the exception MQConstants.MQRC_NO_MSG_AVAILABLE + logger.debug("Start reading events from performance queue {}", performanceEventsQueueName); + while (true) { + try { + MQGetMessageOptions getOptions = new MQGetMessageOptions(); + getOptions.options = MQConstants.MQGMO_NO_WAIT | MQConstants.MQGMO_FAIL_IF_QUIESCING; + MQMessage message = new MQMessage(); + + queue.get(message, getOptions); + PCFMessage receivedMsg = new PCFMessage(message); + incrementCounterByEventType(context, receivedMsg); + counter++; + } catch (MQException e) { + if (e.reasonCode != MQConstants.MQRC_NO_MSG_AVAILABLE) { + logger.error(e.getMessage(), e); + } + break; + } catch (IOException e) { + logger.error(e.getMessage(), e); + break; + } + } + } finally { + if (queue != null) { + queue.close(); + } + } + logger.debug("Read {} events from performance queue {}", counter, performanceEventsQueueName); + } + + private void incrementCounterByEventType(MetricsCollectorContext context, PCFMessage receivedMsg) + throws PCFException { + String queueName = receivedMsg.getStringParameterValue(CMQC.MQCA_BASE_OBJECT_NAME).trim(); + Attributes attributes = + Attributes.of( + IBM_MQ_QUEUE_MANAGER, + context.getQueueManagerName(), + MESSAGING_DESTINATION_NAME, + queueName); + switch (receivedMsg.getReason()) { + case CMQC.MQRC_Q_FULL: + if (context.getMetricsConfig().isIbmMqQueueDepthFullEventEnabled()) { + fullQueueDepthCounter.add(1, attributes); + } + break; + case CMQC.MQRC_Q_DEPTH_HIGH: + if (context.getMetricsConfig().isIbmMqQueueDepthHighEventEnabled()) { + highQueueDepthCounter.add(1, attributes); + } + break; + case CMQC.MQRC_Q_DEPTH_LOW: + if (context.getMetricsConfig().isIbmMqQueueDepthLowEventEnabled()) { + lowQueueDepthCounter.add(1, attributes); + } + break; + default: + logger.debug("Unknown event reason {}", receivedMsg.getReason()); + } + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + String performanceEventsQueueName = context.getQueueManager().getPerformanceEventsQueueName(); + logger.info( + "sending PCF agent request to read performance events from queue {}", + performanceEventsQueueName); + try { + readEvents(context, performanceEventsQueueName); + } catch (Exception e) { + logger.error( + "Unexpected error occurred while collecting performance events for queue " + + performanceEventsQueueName, + e); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for performance events is {} milliseconds", exitTime); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddy.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddy.java new file mode 100644 index 000000000..50187adeb --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddy.java @@ -0,0 +1,311 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static com.ibm.mq.constants.CMQC.MQQT_ALIAS; +import static com.ibm.mq.constants.CMQC.MQQT_CLUSTER; +import static com.ibm.mq.constants.CMQC.MQQT_LOCAL; +import static com.ibm.mq.constants.CMQC.MQQT_MODEL; +import static com.ibm.mq.constants.CMQC.MQQT_REMOTE; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_TYPE; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.MESSAGING_DESTINATION_NAME; +import static io.opentelemetry.ibm.mq.metrics.Metrics.MIBY_TO_BYTES; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.MQDataException; +import com.ibm.mq.headers.pcf.MQCFIL; +import com.ibm.mq.headers.pcf.MQCFIN; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFParameter; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A collaborator buddy of the queue collectors that helps them to send a message, process the + * response, and generate metrics. + */ +final class QueueCollectionBuddy { + private static final Logger logger = LoggerFactory.getLogger(QueueCollectionBuddy.class); + private final Map gauges = new HashMap<>(); + + private final QueueCollectorSharedState sharedState; + private final LongGauge onqtimeShort; + private final LongGauge onqtimeLong; + + @FunctionalInterface + private interface AllowedGauge { + void set(MetricsCollectorContext context, Integer value, Attributes attributes); + } + + private static AllowedGauge createAllowedGauge( + LongGauge gauge, Function allowed) { + return createAllowedGauge(gauge, allowed, Integer::longValue /*identity*/); + } + + private static AllowedGauge createAllowedGauge( + LongGauge gauge, + Function allowed, + Function unitMangler) { + return (context, val, attributes) -> { + if (allowed.apply(context.getMetricsConfig())) { + gauge.set(unitMangler.apply(val), attributes); + } + }; + } + + QueueCollectionBuddy(Meter meter, QueueCollectorSharedState sharedState) { + this.sharedState = sharedState; + gauges.put( + CMQC.MQIA_CURRENT_Q_DEPTH, + createAllowedGauge( + Metrics.createIbmMqQueueDepth(meter), MetricsConfig::isIbmMqQueueDepthEnabled)); + gauges.put( + CMQC.MQIA_MAX_Q_DEPTH, + createAllowedGauge( + Metrics.createIbmMqMaxQueueDepth(meter), MetricsConfig::isIbmMqMaxQueueDepthEnabled)); + gauges.put( + CMQC.MQIA_OPEN_INPUT_COUNT, + createAllowedGauge( + Metrics.createIbmMqOpenInputCount(meter), MetricsConfig::isIbmMqOpenInputCountEnabled)); + gauges.put( + CMQC.MQIA_OPEN_OUTPUT_COUNT, + createAllowedGauge( + Metrics.createIbmMqOpenOutputCount(meter), + MetricsConfig::isIbmMqOpenOutputCountEnabled)); + gauges.put( + CMQC.MQIA_Q_SERVICE_INTERVAL, + createAllowedGauge( + Metrics.createIbmMqServiceInterval(meter), + MetricsConfig::isIbmMqServiceIntervalEnabled)); + gauges.put( + CMQC.MQIA_Q_SERVICE_INTERVAL_EVENT, + createAllowedGauge( + Metrics.createIbmMqServiceIntervalEvent(meter), + MetricsConfig::isIbmMqServiceIntervalEventEnabled)); + gauges.put( + CMQCFC.MQIACF_OLDEST_MSG_AGE, + createAllowedGauge( + Metrics.createIbmMqOldestMsgAge(meter), MetricsConfig::isIbmMqOldestMsgAgeEnabled)); + gauges.put( + CMQCFC.MQIACF_UNCOMMITTED_MSGS, + createAllowedGauge( + Metrics.createIbmMqUncommittedMessages(meter), + MetricsConfig::isIbmMqUncommittedMessagesEnabled)); + gauges.put( + CMQC.MQIA_MSG_DEQ_COUNT, + createAllowedGauge( + Metrics.createIbmMqMessageDeqCount(meter), + MetricsConfig::isIbmMqMessageDeqCountEnabled)); + gauges.put( + CMQC.MQIA_MSG_ENQ_COUNT, + createAllowedGauge( + Metrics.createIbmMqMessageEnqCount(meter), + MetricsConfig::isIbmMqMessageEnqCountEnabled)); + gauges.put( + CMQC.MQIA_HIGH_Q_DEPTH, + createAllowedGauge( + Metrics.createIbmMqHighQueueDepth(meter), MetricsConfig::isIbmMqHighQueueDepthEnabled)); + gauges.put( + CMQCFC.MQIACF_CUR_Q_FILE_SIZE, + createAllowedGauge( + Metrics.createIbmMqCurrentQueueFilesize(meter), + MetricsConfig::isIbmMqCurrentQueueFilesizeEnabled, + MIBY_TO_BYTES)); + gauges.put( + CMQCFC.MQIACF_CUR_MAX_FILE_SIZE, + createAllowedGauge( + Metrics.createIbmMqCurrentMaxQueueFilesize(meter), + MetricsConfig::isIbmMqCurrentMaxQueueFilesizeEnabled, + MIBY_TO_BYTES)); + + this.onqtimeShort = Metrics.createIbmMqOnqtimeShortPeriod(meter); + this.onqtimeLong = Metrics.createIbmMqOnqtimeLongPeriod(meter); + } + + /** + * Sends a PCFMessage request, reads the response, and generates metrics from the response. It + * handles all exceptions. + */ + void processPcfRequestAndPublishQMetrics( + MetricsCollectorContext context, PCFMessage request, String queueGenericName, int[] fields) { + try { + doProcessPcfRequestAndPublishQMetrics(context, request, queueGenericName, fields); + } catch (PCFException pcfe) { + logger.error( + "PCFException caught while collecting metric for Queue: {}", queueGenericName, pcfe); + if (pcfe.exceptionSource instanceof PCFMessage[]) { + PCFMessage[] msgs = (PCFMessage[]) pcfe.exceptionSource; + for (PCFMessage msg : msgs) { + logger.error(msg.toString()); + } + } + if (pcfe.exceptionSource instanceof PCFMessage) { + PCFMessage msg = (PCFMessage) pcfe.exceptionSource; + logger.error(msg.toString()); + } + // Don't throw exception as it will stop queue metric collection + } catch (Exception mqe) { + logger.error("MQException caught", mqe); + // Don't throw exception as it will stop queue metric collection + } + } + + private void doProcessPcfRequestAndPublishQMetrics( + MetricsCollectorContext context, PCFMessage request, String queueGenericName, int[] fields) + throws IOException, MQDataException { + logger.debug( + "sending PCF agent request to query metrics for generic queue {}", queueGenericName); + long startTime = System.currentTimeMillis(); + List response = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent queue metrics query response for generic queue {} received in {} milliseconds", + queueGenericName, + endTime); + if (response.isEmpty()) { + logger.debug("Unexpected error while PCFMessage.send(), response is empty"); + return; + } + + List messages = + MessageFilter.ofKind("queue") + .excluding(context.getQueueExcludeFilters()) + .withResourceExtractor(MessageBuddy::queueName) + .filter(response); + + for (PCFMessage message : messages) { + handleMessage(context, message, fields); + } + } + + private void handleMessage(MetricsCollectorContext context, PCFMessage message, int[] fields) + throws PCFException { + String queueName = MessageBuddy.queueName(message); + String queueType = getQueueTypeFromName(message, queueName); + if (queueType == null) { + logger.info("Unable to determine queue type for queue name = {}", queueName); + return; + } + + logger.debug("Pulling out metrics for queue name {}", queueName); + getMetrics(context, message, queueName, queueType, fields); + } + + @Nullable + private String getQueueTypeFromName(PCFMessage message, String queueName) throws PCFException { + if (message.getParameterValue(CMQC.MQIA_Q_TYPE) == null) { + return sharedState.getType(queueName); + } + + String queueType = getQueueType(message); + sharedState.putQueueType(queueName, queueType); + return queueType; + } + + private static String getQueueType(PCFMessage message) throws PCFException { + String baseQueueType = getBaseQueueType(message); + return maybeAppendUsage(message, baseQueueType); + } + + private static String maybeAppendUsage(PCFMessage message, String baseQueueType) + throws PCFException { + if (message.getParameter(CMQC.MQIA_USAGE) == null) { + return baseQueueType; + } + switch (message.getIntParameterValue(CMQC.MQIA_USAGE)) { + case CMQC.MQUS_NORMAL: + return baseQueueType + "-normal"; + case CMQC.MQUS_TRANSMISSION: + return baseQueueType + "-transmission"; + default: + return baseQueueType; + } + } + + private static String getBaseQueueType(PCFMessage message) throws PCFException { + switch (message.getIntParameterValue(CMQC.MQIA_Q_TYPE)) { + case MQQT_LOCAL: + return "local"; + case MQQT_ALIAS: + return "alias"; + case MQQT_REMOTE: + return "remote"; + case MQQT_CLUSTER: + return "cluster"; + case MQQT_MODEL: + return "model"; + default: + logger.warn("Unknown type of queue {}", message.getIntParameterValue(CMQC.MQIA_Q_TYPE)); + return "unknown"; + } + } + + private void getMetrics( + MetricsCollectorContext context, + PCFMessage pcfMessage, + String queueName, + String queueType, + int[] fields) + throws PCFException { + + for (int field : fields) { + if (field == CMQC.MQCA_Q_NAME || field == CMQC.MQIA_USAGE || field == CMQC.MQIA_Q_TYPE) { + continue; + } + updateMetrics(context, pcfMessage, queueName, queueType, field); + } + } + + private void updateMetrics( + MetricsCollectorContext context, + PCFMessage pcfMessage, + String queueName, + String queueType, + int constantValue) + throws PCFException { + PCFParameter pcfParam = pcfMessage.getParameter(constantValue); + Attributes attributes = + Attributes.of( + MESSAGING_DESTINATION_NAME, + queueName, + IBM_MQ_QUEUE_TYPE, + queueType, + IBM_MQ_QUEUE_MANAGER, + context.getQueueManagerName()); + + if (pcfParam instanceof MQCFIN) { + AllowedGauge g = this.gauges.get(constantValue); + if (g == null) { + throw new IllegalArgumentException("Unknown constantValue " + constantValue); + } + int metricVal = pcfMessage.getIntParameterValue(constantValue); + g.set(context, metricVal, attributes); + } + if (pcfParam instanceof MQCFIL) { + int[] metricVals = pcfMessage.getIntListParameterValue(constantValue); + if (context.getMetricsConfig().isIbmMqOnqtimeShortPeriodEnabled()) { + onqtimeShort.set(metricVals[0], attributes); + } + if (context.getMetricsConfig().isIbmMqOnqtimeLongPeriodEnabled()) { + onqtimeLong.set(metricVals[1], attributes); + } + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectorSharedState.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectorSharedState.java new file mode 100644 index 000000000..94fcc7788 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectorSharedState.java @@ -0,0 +1,25 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import java.util.concurrent.ConcurrentHashMap; +import javax.annotation.Nullable; + +final class QueueCollectorSharedState { + + private final ConcurrentHashMap queueNameToType = new ConcurrentHashMap<>(); + + QueueCollectorSharedState() {} + + public void putQueueType(String name, String value) { + queueNameToType.put(name, value); + } + + @Nullable + public String getType(String name) { + return queueNameToType.get(name); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerEventCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerEventCollector.java new file mode 100644 index 000000000..723e433cf --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerEventCollector.java @@ -0,0 +1,111 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.SERVICE_NAME; +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.USER_NAME; + +import com.ibm.mq.MQException; +import com.ibm.mq.MQGetMessageOptions; +import com.ibm.mq.MQMessage; +import com.ibm.mq.MQQueue; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.io.IOException; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// Reads queue manager events and counts them as metrics +public final class QueueManagerEventCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(QueueManagerEventCollector.class); + private final LongCounter authorityEventCounter; + + public QueueManagerEventCollector(Meter meter) { + this.authorityEventCounter = Metrics.createIbmMqUnauthorizedEvent(meter); + } + + private void readEvents(MetricsCollectorContext context, String queueManagerEventsQueueName) + throws Exception { + + MQQueue queue = null; + try { + int queueAccessOptions = MQConstants.MQOO_FAIL_IF_QUIESCING | MQConstants.MQOO_INPUT_SHARED; + queue = + context.getMqQueueManager().accessQueue(queueManagerEventsQueueName, queueAccessOptions); + // keep going until receiving the exception MQConstants.MQRC_NO_MSG_AVAILABLE + while (true) { + try { + MQGetMessageOptions getOptions = new MQGetMessageOptions(); + getOptions.options = MQConstants.MQGMO_NO_WAIT | MQConstants.MQGMO_FAIL_IF_QUIESCING; + MQMessage message = new MQMessage(); + + queue.get(message, getOptions); + PCFMessage received = new PCFMessage(message); + if (received.getReason() == CMQC.MQRC_NOT_AUTHORIZED) { + + if (context.getMetricsConfig().isIbmMqUnauthorizedEventEnabled()) { + String username = received.getStringParameterValue(CMQCFC.MQCACF_USER_IDENTIFIER); + String applicationName = received.getStringParameterValue(CMQCFC.MQCACF_APPL_NAME); + authorityEventCounter.add( + 1, + Attributes.of( + IBM_MQ_QUEUE_MANAGER, + context.getQueueManagerName(), + USER_NAME, + username, + SERVICE_NAME, + applicationName)); + } + } else { + logger.debug("Unknown event reason {}", received.getReason()); + } + + } catch (MQException e) { + if (e.reasonCode != MQConstants.MQRC_NO_MSG_AVAILABLE) { + logger.error(e.getMessage(), e); + } + break; + } catch (IOException e) { + logger.error(e.getMessage(), e); + break; + } + } + } finally { + if (queue != null) { + queue.close(); + } + } + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + String queueManagerEventsQueueName = context.getQueueManager().getQueueManagerEventsQueueName(); + logger.info( + "sending PCF agent request to read queue manager events from queue {}", + queueManagerEventsQueueName); + try { + readEvents(context, queueManagerEventsQueueName); + } catch (Exception e) { + logger.error( + "Unexpected error occurred while collecting queue manager events for queue " + + queueManagerEventsQueueName, + e); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for queue manager events is {} milliseconds", exitTime); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollector.java new file mode 100644 index 000000000..2b3d57086 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollector.java @@ -0,0 +1,102 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; +import static io.opentelemetry.ibm.mq.metrics.Metrics.MIBY_TO_BYTES; + +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.util.List; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** This class is responsible for queue manager metric collection. */ +public final class QueueManagerMetricsCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(QueueManagerMetricsCollector.class); + + private final LongGauge statusGauge; + private final LongGauge connectionCountGauge; + private final LongGauge restartLogSizeGauge; + private final LongGauge reuseLogSizeGauge; + private final LongGauge archiveLogSizeGauge; + private final LongGauge maxActiveChannelsGauge; + + public QueueManagerMetricsCollector(Meter meter) { + this.statusGauge = Metrics.createIbmMqManagerStatus(meter); + this.connectionCountGauge = Metrics.createIbmMqConnectionCount(meter); + this.restartLogSizeGauge = Metrics.createIbmMqRestartLogSize(meter); + this.reuseLogSizeGauge = Metrics.createIbmMqReusableLogSize(meter); + this.archiveLogSizeGauge = Metrics.createIbmMqArchiveLogSize(meter); + this.maxActiveChannelsGauge = Metrics.createIbmMqManagerMaxActiveChannels(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + logger.debug( + "publishMetrics entry time for queuemanager {} is {} milliseconds", + context.getQueueManagerName(), + entryTime); + // CMQCFC.MQCMD_INQUIRE_Q_MGR_STATUS is 161 + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q_MGR_STATUS); + // CMQCFC.MQIACF_Q_MGR_STATUS_ATTRS is 1229 + request.addParameter(CMQCFC.MQIACF_Q_MGR_STATUS_ATTRS, new int[] {CMQCFC.MQIACF_ALL}); + try { + // Note that agent.send() method is synchronized + logger.debug( + "sending PCF agent request to query queuemanager {}", context.getQueueManagerName()); + long startTime = System.currentTimeMillis(); + List responses = context.send(request); + long endTime = System.currentTimeMillis() - startTime; + logger.debug( + "PCF agent queuemanager metrics query response for {} received in {} milliseconds", + context.getQueueManagerName(), + endTime); + if (responses.isEmpty()) { + logger.debug("Unexpected error while PCFMessage.send(), response is empty"); + return; + } + Attributes attributes = Attributes.of(IBM_MQ_QUEUE_MANAGER, context.getQueueManagerName()); + if (context.getMetricsConfig().isIbmMqManagerStatusEnabled()) { + int status = responses.get(0).getIntParameterValue(CMQCFC.MQIACF_Q_MGR_STATUS); + statusGauge.set(status, attributes); + } + if (context.getMetricsConfig().isIbmMqConnectionCountEnabled()) { + int count = responses.get(0).getIntParameterValue(CMQCFC.MQIACF_CONNECTION_COUNT); + connectionCountGauge.set(count, attributes); + } + if (context.getMetricsConfig().isIbmMqRestartLogSizeEnabled()) { + int logSize = responses.get(0).getIntParameterValue(CMQCFC.MQIACF_RESTART_LOG_SIZE); + restartLogSizeGauge.set(MIBY_TO_BYTES.apply(logSize), attributes); + } + if (context.getMetricsConfig().isIbmMqReusableLogSizeEnabled()) { + int logSize = responses.get(0).getIntParameterValue(CMQCFC.MQIACF_REUSABLE_LOG_SIZE); + reuseLogSizeGauge.set(MIBY_TO_BYTES.apply(logSize), attributes); + } + if (context.getMetricsConfig().isIbmMqArchiveLogSizeEnabled()) { + int logSize = responses.get(0).getIntParameterValue(CMQCFC.MQIACF_ARCHIVE_LOG_SIZE); + archiveLogSizeGauge.set(MIBY_TO_BYTES.apply(logSize), attributes); + } + if (context.getMetricsConfig().isIbmMqManagerMaxActiveChannelsEnabled()) { + int maxActiveChannels = context.getQueueManager().getMaxActiveChannels(); + maxActiveChannelsGauge.set(maxActiveChannels, attributes); + } + } catch (Exception e) { + logger.error(e.getMessage()); + throw new IllegalStateException(e); + } finally { + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug("Time taken to publish metrics for queuemanager is {} milliseconds", exitTime); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueMetricsCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueMetricsCollector.java new file mode 100644 index 000000000..35370fb73 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/QueueMetricsCollector.java @@ -0,0 +1,62 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class QueueMetricsCollector implements Consumer { + + private static final Logger logger = LoggerFactory.getLogger(QueueMetricsCollector.class); + + private final List> publishers = new ArrayList<>(); + private final InquireQCmdCollector inquireQueueCmd; + private final ExecutorService threadPool; + private final ConfigWrapper config; + + public QueueMetricsCollector(Meter meter, ExecutorService threadPool, ConfigWrapper config) { + this.threadPool = threadPool; + this.config = config; + QueueCollectionBuddy queueBuddy = + new QueueCollectionBuddy(meter, new QueueCollectorSharedState()); + this.inquireQueueCmd = new InquireQCmdCollector(queueBuddy); + publishers.add(new InquireQStatusCmdCollector(queueBuddy)); + publishers.add(new ResetQStatsCmdCollector(queueBuddy)); + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting queue metrics..."); + + // first collect all queue types. + inquireQueueCmd.accept(context); + + // schedule all other jobs in parallel. + List> taskJobs = new ArrayList<>(); + for (Consumer p : publishers) { + taskJobs.add( + () -> { + p.accept(context); + return null; + }); + } + + try { + int timeout = this.config.getInt("queueMetricsCollectionTimeoutInSeconds", 20); + threadPool.invokeAll(taskJobs, timeout, TimeUnit.SECONDS); + } catch (InterruptedException e) { + logger.error("The thread was interrupted ", e); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ReadConfigurationEventQueueCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ReadConfigurationEventQueueCollector.java new file mode 100644 index 000000000..f7e56134d --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ReadConfigurationEventQueueCollector.java @@ -0,0 +1,142 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.IBM_MQ_QUEUE_MANAGER; + +import com.ibm.mq.MQException; +import com.ibm.mq.MQGetMessageOptions; +import com.ibm.mq.MQMessage; +import com.ibm.mq.MQQueue; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.headers.pcf.PCFMessage; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.metrics.Metrics; +import java.io.IOException; +import java.util.function.Consumer; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class ReadConfigurationEventQueueCollector + implements Consumer { + + private static final Logger logger = + LoggerFactory.getLogger(ReadConfigurationEventQueueCollector.class); + private final long bootTime; + private final LongGauge maxHandlesGauge; + + public ReadConfigurationEventQueueCollector(Meter meter) { + this.bootTime = System.currentTimeMillis(); + this.maxHandlesGauge = Metrics.createIbmMqManagerMaxHandles(meter); + } + + @Nullable + private PCFMessage findLastUpdate( + MetricsCollectorContext context, long entryTime, String configurationQueueName) + throws Exception { + // find the last update: + PCFMessage candidate = null; + + boolean consumeEvents = + context.getQueueManager().getConsumeConfigurationEventInterval() > 0 + && (entryTime - this.bootTime) + % context.getQueueManager().getConsumeConfigurationEventInterval() + == 0; + + MQQueue queue = null; + try { + int queueAccessOptions = MQConstants.MQOO_FAIL_IF_QUIESCING | MQConstants.MQOO_INPUT_SHARED; + if (!consumeEvents) { + // we are not consuming the events. + queueAccessOptions |= MQConstants.MQOO_BROWSE; + } + queue = context.getMqQueueManager().accessQueue(configurationQueueName, queueAccessOptions); + int maxSequenceNumber = 0; + // keep going until receiving the exception MQConstants.MQRC_NO_MSG_AVAILABLE + while (true) { + try { + MQGetMessageOptions getOptions = new MQGetMessageOptions(); + getOptions.options = MQConstants.MQGMO_NO_WAIT | MQConstants.MQGMO_FAIL_IF_QUIESCING; + if (!consumeEvents) { + getOptions.options |= MQConstants.MQGMO_BROWSE_NEXT; + } + MQMessage message = new MQMessage(); + + queue.get(message, getOptions); + PCFMessage received = new PCFMessage(message); + if (received.getMsgSeqNumber() > maxSequenceNumber) { + maxSequenceNumber = received.getMsgSeqNumber(); + candidate = received; + } + + } catch (MQException e) { + if (e.reasonCode != MQConstants.MQRC_NO_MSG_AVAILABLE) { + logger.error(e.getMessage(), e); + } + break; + } catch (IOException e) { + logger.error(e.getMessage(), e); + break; + } + } + } finally { + if (queue != null) { + queue.close(); + } + } + return candidate; + } + + @Override + public void accept(MetricsCollectorContext context) { + long entryTime = System.currentTimeMillis(); + String configurationQueueName = context.getQueueManager().getConfigurationQueueName(); + logger.info( + "sending PCF agent request to read configuration events from queue {}", + configurationQueueName); + try { + + PCFMessage candidate = findLastUpdate(context, entryTime, configurationQueueName); + + if (candidate == null) { + if (context.getQueueManager().isRefreshQueueManagerConfigurationEnabled()) { + // no event found. + // we issue a refresh request, which will generate a configuration event on the + // configuration event queue. + // note this may incur a performance cost to the queue manager. + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_REFRESH_Q_MGR); + request.addParameter(CMQCFC.MQIACF_REFRESH_TYPE, CMQCFC.MQRT_CONFIGURATION); + request.addParameter(CMQCFC.MQIACF_OBJECT_TYPE, CMQC.MQOT_Q_MGR); + context.send(request); + // try again: + candidate = findLastUpdate(context, entryTime, configurationQueueName); + } + } + + if (candidate != null) { + if (context.getMetricsConfig().isIbmMqManagerMaxHandlesEnabled()) { + int maxHandles = candidate.getIntParameterValue(CMQC.MQIA_MAX_HANDLES); + maxHandlesGauge.set( + maxHandles, Attributes.of(IBM_MQ_QUEUE_MANAGER, context.getQueueManager().getName())); + } + } + + } catch (Exception e) { + logger.error( + "Unexpected error occurred while collecting configuration events for queue " + + configurationQueueName, + e); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for configuration events is {} milliseconds", exitTime); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ResetQStatsCmdCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ResetQStatsCmdCollector.java new file mode 100644 index 000000000..6f75acfe2 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/ResetQStatsCmdCollector.java @@ -0,0 +1,56 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import java.util.Arrays; +import java.util.Set; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class ResetQStatsCmdCollector implements Consumer { + + static final int[] ATTRIBUTES = + new int[] {CMQC.MQIA_HIGH_Q_DEPTH, CMQC.MQIA_MSG_DEQ_COUNT, CMQC.MQIA_MSG_ENQ_COUNT}; + + private static final Logger logger = LoggerFactory.getLogger(ResetQStatsCmdCollector.class); + + static final String COMMAND = "MQCMD_RESET_Q_STATS"; + private final QueueCollectionBuddy queueBuddy; + + ResetQStatsCmdCollector(QueueCollectionBuddy queueBuddy) { + this.queueBuddy = queueBuddy; + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting metrics for command {}", COMMAND); + long entryTime = System.currentTimeMillis(); + + logger.debug( + "Attributes being sent along PCF agent request to query queue metrics: {} for command {}", + Arrays.toString(ATTRIBUTES), + COMMAND); + + Set queueGenericNames = context.getQueueIncludeFilterNames(); + for (String queueGenericName : queueGenericNames) { + // list of all metrics extracted through MQCMD_RESET_Q_STATS is mentioned here + // https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.ref.adm.doc/q088310_.htm + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_RESET_Q_STATS); + request.addParameter(CMQC.MQCA_Q_NAME, queueGenericName); + queueBuddy.processPcfRequestAndPublishQMetrics( + context, request, queueGenericName, ATTRIBUTES); + } + long exitTime = System.currentTimeMillis() - entryTime; + logger.debug( + "Time taken to publish metrics for all queues is {} milliseconds for command {}", + exitTime, + COMMAND); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollector.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollector.java new file mode 100644 index 000000000..76ff2a957 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollector.java @@ -0,0 +1,26 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import io.opentelemetry.api.metrics.Meter; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class TopicMetricsCollector implements Consumer { + private static final Logger logger = LoggerFactory.getLogger(TopicMetricsCollector.class); + private final InquireTStatusCmdCollector inquireTStatusCmdCollector; + + public TopicMetricsCollector(Meter meter) { + this.inquireTStatusCmdCollector = new InquireTStatusCmdCollector(meter); + } + + @Override + public void accept(MetricsCollectorContext context) { + logger.info("Collecting Topic metrics..."); + inquireTStatusCmdCollector.accept(context); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Config.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Config.java new file mode 100644 index 000000000..68ede61c7 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Config.java @@ -0,0 +1,83 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.opentelemetry; + +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Utilities reading configuration and create domain objects */ +final class Config { + + private static final Logger logger = LoggerFactory.getLogger(Config.class); + + private Config() {} + + static void setUpSslConnection(Map config) { + getConfigValueAndSetSystemProperty(config, "keyStorePath", "javax.net.ssl.keyStore"); + getConfigValueAndSetSystemProperty( + config, "keyStorePassword", "javax.net.ssl.keyStorePassword"); + getConfigValueAndSetSystemProperty(config, "trustStorePath", "javax.net.ssl.trustStorePath"); + getConfigValueAndSetSystemProperty( + config, "trustStorePassword", "javax.net.ssl.trustStorePassword"); + } + + private static void getConfigValueAndSetSystemProperty( + Map otlpConfig, String configKey, String systemKey) { + Object configValue = otlpConfig.get(configKey); + if (configValue instanceof String && !((String) configValue).trim().isEmpty()) { + System.setProperty(systemKey, (String) configValue); + } + } + + static void configureSecurity(ConfigWrapper config) { + Map sslConnection = config.getSslConnection(); + if (sslConnection.isEmpty()) { + logger.debug( + "ssl truststore and keystore are not configured in config.yml, if SSL is enabled, pass them as jvm args"); + return; + } + + configureTrustStore(sslConnection); + configureKeyStore(sslConnection); + } + + private static void configureTrustStore(Map sslConnection) { + String trustStorePath = sslConnection.get("trustStorePath"); + if (trustStorePath == null || trustStorePath.isEmpty()) { + logger.debug( + "trustStorePath is not set in config.yml, ignoring setting trustStorePath as system property"); + return; + } + + System.setProperty("javax.net.ssl.trustStore", trustStorePath); + logger.debug("System property set for javax.net.ssl.trustStore is {}", trustStorePath); + + String trustStorePassword = sslConnection.get("trustStorePassword"); + + if (trustStorePassword != null && !trustStorePassword.isEmpty()) { + System.setProperty("javax.net.ssl.trustStorePassword", trustStorePassword); + logger.debug("System property set for javax.net.ssl.trustStorePassword is xxxxx"); + } + } + + private static void configureKeyStore(Map sslConnection) { + String keyStorePath = sslConnection.get("keyStorePath"); + if (keyStorePath == null || keyStorePath.isEmpty()) { + logger.debug( + "keyStorePath is not set in config.yml, ignoring setting keyStorePath as system property"); + return; + } + + System.setProperty("javax.net.ssl.keyStore", keyStorePath); + logger.debug("System property set for javax.net.ssl.keyStore is {}", keyStorePath); + String keyStorePassword = sslConnection.get("keyStorePassword"); + if (keyStorePassword != null && !keyStorePassword.isEmpty()) { + System.setProperty("javax.net.ssl.keyStorePassword", keyStorePassword); + logger.debug("System property set for javax.net.ssl.keyStorePassword is xxxxx"); + } + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapper.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapper.java new file mode 100644 index 000000000..5f1d71e65 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapper.java @@ -0,0 +1,129 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.opentelemetry; + +import static java.util.Collections.emptyList; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +/** Low-fi domain-specific yaml wrapper. */ +public final class ConfigWrapper { + + private static final Logger logger = LoggerFactory.getLogger(ConfigWrapper.class); + + private static final int DEFAULT_THREADS = 20; + private static final int DEFAULT_DELAY_SECONDS = 60; + private static final int DEFAULT_INITIAL_DELAY = 0; + + private final Map config; + + private ConfigWrapper(Map config) { + this.config = config; + } + + public static ConfigWrapper parse(String configFile) throws IOException { + Yaml yaml = new Yaml(); + Map config = + yaml.load(Files.newBufferedReader(Paths.get(configFile), Charset.defaultCharset())); + return new ConfigWrapper(config); + } + + public int getNumberOfThreads() { + int value = defaultedInt(getTaskSchedule(), "numberOfThreads", DEFAULT_THREADS); + if (value < DEFAULT_THREADS) { + logger.warn( + "numberOfThreads {} is less than the minimum number of threads allowed. Using {} instead.", + value, + DEFAULT_THREADS); + value = DEFAULT_THREADS; + } + return value; + } + + int getTaskDelaySeconds() { + return defaultedInt(getTaskSchedule(), "taskDelaySeconds", DEFAULT_DELAY_SECONDS); + } + + Duration getTaskDelay() { + return Duration.ofSeconds(getTaskDelaySeconds()); + } + + int getTaskInitialDelaySeconds() { + return defaultedInt(getTaskSchedule(), "initialDelaySeconds", DEFAULT_INITIAL_DELAY); + } + + @NotNull + @SuppressWarnings("unchecked") + List getQueueManagerNames() { + return getQueueManagers().stream() + .map(o -> (Map) o) + .map(x -> x.get("name")) + .collect(Collectors.toList()); + } + + @NotNull + @SuppressWarnings("unchecked") + public List> getQueueManagers() { + List> result = (List>) config.get("queueManagers"); + if (result == null) { + return emptyList(); + } + return result; + } + + @NotNull + @SuppressWarnings("unchecked") + public Map getSslConnection() { + Map result = (Map) config.get("sslConnection"); + if (result == null) { + return Collections.emptyMap(); + } + return result; + } + + public int getInt(String key, int defaultValue) { + Object result = config.get(key); + if (result == null) { + return defaultValue; + } + return (Integer) result; + } + + @NotNull + @SuppressWarnings("unchecked") + public Map getMetrics() { + Object metrics = config.get("metrics"); + if (!(metrics instanceof Map)) { + throw new IllegalArgumentException("config metrics section is missing"); + } + return (Map) metrics; + } + + private static int defaultedInt(Map section, String key, int defaultValue) { + Object val = section.get(key); + return val instanceof Integer ? (Integer) val : defaultValue; + } + + @SuppressWarnings("unchecked") + private Map getTaskSchedule() { + if (config.get("taskSchedule") instanceof Map) { + return (Map) config.get("taskSchedule"); + } + return Collections.emptyMap(); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Main.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Main.java new file mode 100644 index 000000000..abd7e69fb --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/opentelemetry/Main.java @@ -0,0 +1,87 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.opentelemetry; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.ibm.mq.WmqMonitor; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import org.jetbrains.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("SystemOut") +public final class Main { + + private static final Logger logger = LoggerFactory.getLogger(Main.class); + + private Main() {} + + public static void main(String[] args) throws Exception { + if (args.length == 0) { + System.err.println("Usage: Main "); + System.exit(1); + } + + try { + Class.forName("com.ibm.mq.headers.MQDataException"); + } catch (ClassNotFoundException e) { + System.err.println("IBM MQ jar is missing from classpath."); + System.exit(1); + } + + String configFile = args[0]; + + ConfigWrapper config = ConfigWrapper.parse(configFile); + + Thread.UncaughtExceptionHandler handler = + (t, e) -> logger.error("Unhandled exception in thread pool", e); + logger.debug("Initializing thread pool with {} threads", config.getNumberOfThreads()); + ScheduledExecutorService service = + Executors.newScheduledThreadPool( + config.getNumberOfThreads(), + r -> { + Thread thread = new Thread(r); + thread.setUncaughtExceptionHandler(handler); + return thread; + }); + + Config.configureSecurity(config); + Config.setUpSslConnection(config.getSslConnection()); + + run(config, service); + } + + public static void run(ConfigWrapper config, ScheduledExecutorService service) { + + AutoConfiguredOpenTelemetrySdk sdk = + AutoConfiguredOpenTelemetrySdk.builder() + .addMeterProviderCustomizer( + (builder, configProps) -> builder.setResource(Resource.empty())) + .build(); + + OpenTelemetrySdk otel = sdk.getOpenTelemetrySdk(); + + run(config, service, otel); + } + + @VisibleForTesting + public static void run( + ConfigWrapper config, ScheduledExecutorService service, OpenTelemetry otel) { + WmqMonitor monitor = new WmqMonitor(config, service, otel.getMeter("websphere/mq")); + ScheduledFuture unused = + service.scheduleAtFixedRate( + monitor::run, + config.getTaskInitialDelaySeconds(), + config.getTaskDelaySeconds(), + TimeUnit.SECONDS); + } +} diff --git a/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/util/WmqUtil.java b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/util/WmqUtil.java new file mode 100644 index 000000000..cf633b953 --- /dev/null +++ b/ibm-mq-metrics/src/main/java/io/opentelemetry/ibm/mq/util/WmqUtil.java @@ -0,0 +1,78 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.util; + +import com.ibm.mq.MQException; +import com.ibm.mq.MQQueueManager; +import com.ibm.mq.headers.MQDataException; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.ibm.mq.WmqContext; +import io.opentelemetry.ibm.mq.config.QueueManager; +import java.util.Hashtable; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class WmqUtil { + + private static final Logger logger = LoggerFactory.getLogger(WmqUtil.class); + + private WmqUtil() {} + + public static PCFMessageAgent initPcfMessageAgent( + QueueManager queueManager, MQQueueManager ibmQueueManager) { + try { + PCFMessageAgent agent; + if (isNotNullOrEmpty(queueManager.getModelQueueName()) + && isNotNullOrEmpty(queueManager.getReplyQueuePrefix())) { + logger.debug("Initializing the PCF agent for model queue and reply queue prefix."); + agent = new PCFMessageAgent(); + agent.setModelQueueName(queueManager.getModelQueueName()); + agent.setReplyQueuePrefix(queueManager.getReplyQueuePrefix()); + logger.debug("Connecting to queueManager to set the modelQueueName and replyQueuePrefix."); + agent.connect(ibmQueueManager); + } else { + agent = new PCFMessageAgent(ibmQueueManager); + } + if (queueManager.getCcsid() != Integer.MIN_VALUE) { + agent.setCharacterSet(queueManager.getCcsid()); + } + + if (queueManager.getEncoding() != Integer.MIN_VALUE) { + agent.setEncoding(queueManager.getEncoding()); + } + logger.debug( + "Initialized PCFMessageAgent for queueManager {} in thread {}", + agent.getQManagerName(), + Thread.currentThread().getName()); + return agent; + } catch (MQDataException mqe) { + logger.error(mqe.getMessage(), mqe); + throw new IllegalStateException(mqe); + } + } + + @SuppressWarnings("rawtypes") + public static MQQueueManager connectToQueueManager(QueueManager queueManager) { + WmqContext auth = new WmqContext(queueManager); + Hashtable env = auth.getMqEnvironment(); + try { + MQQueueManager ibmQueueManager = new MQQueueManager(queueManager.getName(), env); + logger.debug( + "MQQueueManager connection initiated for queueManager {} in thread {}", + queueManager.getName(), + Thread.currentThread().getName()); + return ibmQueueManager; + } catch (MQException mqe) { + logger.error(mqe.getMessage(), mqe); + throw new IllegalStateException(mqe.getMessage()); + } + } + + private static boolean isNotNullOrEmpty(@Nullable String str) { + return str != null && !str.isEmpty(); + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollectorTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollectorTest.java new file mode 100644 index 000000000..b4ae65e88 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ChannelMetricsCollectorTest.java @@ -0,0 +1,227 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static com.ibm.mq.constants.CMQC.MQRC_SELECTOR_ERROR; +import static com.ibm.mq.constants.CMQCFC.MQRCCF_CHL_STATUS_NOT_FOUND; +import static io.opentelemetry.ibm.mq.metricscollector.MetricAssert.assertThatMetric; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFException; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class ChannelMetricsCollectorTest { + + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + ChannelMetricsCollector classUnderTest; + QueueManager queueManager; + MetricsCollectorContext context; + Meter meter; + @Mock PCFMessageAgent pcfMessageAgent; + + @BeforeEach + void setup() throws Exception { + ConfigWrapper config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + queueManager = mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + context = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + } + + @Test + void testPublishMetrics() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))) + .thenReturn(createPCFResponseForInquireChannelStatusCmd()); + classUnderTest = new ChannelMetricsCollector(meter); + + classUnderTest.accept(context); + + List metricsList = + new ArrayList<>( + Arrays.asList( + "ibm.mq.message.count", + "ibm.mq.status", + "ibm.mq.byte.sent", + "ibm.mq.byte.received", + "ibm.mq.buffers.sent", + "ibm.mq.buffers.received")); + + for (MetricData metric : otelTesting.getMetrics()) { + if (metricsList.remove(metric.getName())) { + if (metric.getName().equals("ibm.mq.message.count")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(17); + } + + if (metric.getName().equals("ibm.mq.status")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(3); + } + if (metric.getName().equals("ibm.mq.byte.sent")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(6984); + } + if (metric.getName().equals("ibm.mq.byte.received")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(5772); + } + if (metric.getName().equals("ibm.mq.buffers.sent")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(19); + } + if (metric.getName().equals("ibm.mq.buffers.received")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(20); + } + } + } + assertThat(metricsList).isEmpty(); + } + + /* + Request + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 42 (MQCMD_INQUIRE_CHANNEL_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 3] + MQCFST [type: 4, strucLength: 24, parameter: 3501 (MQCACH_FIRST/MQCACH_CHANNEL_NAME), codedCharSetId: 0, stringLength: 1, string: *] + MQCFIN [type: 3, strucLength: 16, parameter: 1523 (MQIACH_CHANNEL_INSTANCE_TYPE), value: 1011] + MQCFIL [type: 5, strucLength: 48, parameter: 1524 (MQIACH_CHANNEL_INSTANCE_ATTRS), count: 8, values: {3501, 3506, 1527, 1534, 1538, 1535, 1539, 1536}] + + Response + PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 42 (MQCMD_INQUIRE_CHANNEL_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 11] + MQCFST [type: 4, strucLength: 40, parameter: 3501 (MQCACH_FIRST/MQCACH_CHANNEL_NAME), codedCharSetId: 819, stringLength: 20, string: DEV.ADMIN.SVRCONN ] + MQCFIN [type: 3, strucLength: 16, parameter: 1511 (MQIACH_CHANNEL_TYPE), value: 7] + MQCFIN [type: 3, strucLength: 16, parameter: 1539 (MQIACH_BUFFERS_RCVD/MQIACH_BUFFERS_RECEIVED), value: 20] + MQCFIN [type: 3, strucLength: 16, parameter: 1538 (MQIACH_BUFFERS_SENT), value: 19] + MQCFIN [type: 3, strucLength: 16, parameter: 1536 (MQIACH_BYTES_RCVD/MQIACH_BYTES_RECEIVED), value: 5772] + MQCFIN [type: 3, strucLength: 16, parameter: 1535 (MQIACH_BYTES_SENT), value: 6984] + MQCFST [type: 4, strucLength: 284, parameter: 3506 (MQCACH_CONNECTION_NAME), codedCharSetId: 819, stringLength: 264, string: 172.17.0.1] + MQCFIN [type: 3, strucLength: 16, parameter: 1523 (MQIACH_CHANNEL_INSTANCE_TYPE), value: 1011] + MQCFIN [type: 3, strucLength: 16, parameter: 1534 (MQIACH_MSGS), value: 17] + MQCFIN [type: 3, strucLength: 16, parameter: 1527 (MQIACH_CHANNEL_STATUS), value: 3] + MQCFIN [type: 3, strucLength: 16, parameter: 1609 (MQIACH_CHANNEL_SUBSTATE), value: 300] + */ + + private static PCFMessage[] createPCFResponseForInquireChannelStatusCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_CHANNEL_STATUS, 1, true); + response1.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, "DEV.ADMIN.SVRCONN"); + response1.addParameter(CMQCFC.MQIACH_CHANNEL_TYPE, 7); + response1.addParameter(CMQCFC.MQIACH_BUFFERS_RECEIVED, 20); + response1.addParameter(CMQCFC.MQIACH_BUFFERS_SENT, 19); + response1.addParameter(CMQCFC.MQIACH_BYTES_RECEIVED, 5772); + response1.addParameter(CMQCFC.MQIACH_BYTES_SENT, 6984); + response1.addParameter(CMQCFC.MQCACH_CONNECTION_NAME, "172.17.0.1 "); + response1.addParameter(CMQCFC.MQIACH_CHANNEL_INSTANCE_TYPE, 1011); + response1.addParameter(CMQCFC.MQIACH_MSGS, 17); + response1.addParameter(CMQCFC.MQIACH_CHANNEL_STATUS, 3); + response1.addParameter(CMQCFC.MQIACH_CHANNEL_SUBSTATE, 300); + response1.addParameter(CMQCFC.MQCACH_CHANNEL_START_DATE, "2012-01-03"); + response1.addParameter(CMQCFC.MQCACH_CHANNEL_START_TIME, "22.33.44"); + response1.addParameter(CMQCFC.MQCACH_MCA_JOB_NAME, "000042040000000C"); + + PCFMessage response2 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_CHANNEL_STATUS, 2, true); + response2.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, "DEV.APP.SVRCONN"); + response2.addParameter(CMQCFC.MQIACH_CHANNEL_TYPE, 7); + response2.addParameter(CMQCFC.MQIACH_BUFFERS_RECEIVED, 20); + response2.addParameter(CMQCFC.MQIACH_BUFFERS_SENT, 19); + response2.addParameter(CMQCFC.MQIACH_BYTES_RECEIVED, 5772); + response2.addParameter(CMQCFC.MQIACH_BYTES_SENT, 6984); + response2.addParameter(CMQCFC.MQCACH_CONNECTION_NAME, "172.17.0.2 "); + response2.addParameter(CMQCFC.MQIACH_CHANNEL_INSTANCE_TYPE, 1011); + response2.addParameter(CMQCFC.MQIACH_MSGS, 17); + response2.addParameter(CMQCFC.MQIACH_CHANNEL_STATUS, 3); + response2.addParameter(CMQCFC.MQIACH_CHANNEL_SUBSTATE, 300); + response2.addParameter(CMQCFC.MQCACH_CHANNEL_START_DATE, "2012-01-04"); + response2.addParameter(CMQCFC.MQCACH_CHANNEL_START_TIME, "22.33.45"); + response2.addParameter(CMQCFC.MQCACH_MCA_JOB_NAME, "000042040000000D"); + + PCFMessage response3 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_CHANNEL_STATUS, 2, true); + response3.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, "TEST.APP.SVRCONN"); + response3.addParameter(CMQCFC.MQIACH_CHANNEL_TYPE, 7); + response3.addParameter(CMQCFC.MQIACH_BUFFERS_RECEIVED, 20); + response3.addParameter(CMQCFC.MQIACH_BUFFERS_SENT, 19); + response3.addParameter(CMQCFC.MQIACH_BYTES_RECEIVED, 5772); + response3.addParameter(CMQCFC.MQIACH_BYTES_SENT, 6984); + response3.addParameter(CMQCFC.MQCACH_CONNECTION_NAME, "172.17.0.2 "); + response3.addParameter(CMQCFC.MQIACH_CHANNEL_INSTANCE_TYPE, 1011); + response3.addParameter(CMQCFC.MQIACH_MSGS, 17); + response3.addParameter(CMQCFC.MQIACH_CHANNEL_STATUS, 3); + response3.addParameter(CMQCFC.MQIACH_CHANNEL_SUBSTATE, 300); + response3.addParameter(CMQCFC.MQCACH_CHANNEL_START_DATE, "2012-01-05"); + response3.addParameter(CMQCFC.MQCACH_CHANNEL_START_TIME, "22.33.46"); + response3.addParameter(CMQCFC.MQCACH_MCA_JOB_NAME, "000042040000000E"); + + return new PCFMessage[] {response1, response2, response3}; + } + + @Test + void testPublishMetrics_nullResponse() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))).thenReturn(null); + classUnderTest = new ChannelMetricsCollector(meter); + + classUnderTest.accept(context); + assertThat(otelTesting.getMetrics()).isEmpty(); + } + + @Test + void testPublishMetrics_emptyResponse() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))).thenReturn(new PCFMessage[] {}); + classUnderTest = new ChannelMetricsCollector(meter); + + classUnderTest.accept(context); + assertThat(otelTesting.getMetrics()).isEmpty(); + } + + @ParameterizedTest + @MethodSource("exceptionsToThrow") + void testPublishMetrics_pfException(Exception exceptionToThrow) throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))).thenThrow(exceptionToThrow); + classUnderTest = new ChannelMetricsCollector(meter); + + classUnderTest.accept(context); + + List exported = otelTesting.getMetrics(); + assertThat(exported.get(0).getLongGaugeData().getPoints()).hasSize(1); + assertThatMetric(exported.get(0), 0).hasName("ibm.mq.manager.active.channels").hasValue(0); + } + + static Stream exceptionsToThrow() { + return Stream.of( + arguments(new RuntimeException("KBAOOM")), + arguments(new PCFException(91, MQRCCF_CHL_STATUS_NOT_FOUND, "flimflam")), + arguments(new PCFException(4, MQRC_SELECTOR_ERROR, "shazbot")), + arguments(new PCFException(4, 42, "boz"))); + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollectorTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollectorTest.java new file mode 100644 index 000000000..b9964537f --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/InquireChannelCmdCollectorTest.java @@ -0,0 +1,99 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.constants.CMQXC; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class InquireChannelCmdCollectorTest { + + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + InquireChannelCmdCollector classUnderTest; + + MetricsCollectorContext context; + Meter meter; + @Mock PCFMessageAgent pcfMessageAgent; + + @BeforeEach + public void setup() throws Exception { + ConfigWrapper config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + QueueManager queueManager = + mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + context = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + } + + @Test + void testProcessPCFRequestAndPublishQMetricsForInquireQStatusCmd() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))) + .thenReturn(createPCFResponseForInquireChannelCmd()); + classUnderTest = new InquireChannelCmdCollector(meter); + classUnderTest.accept(context); + List metricsList = + new ArrayList<>( + Arrays.asList( + "ibm.mq.message.retry.count", + "ibm.mq.message.received.count", + "ibm.mq.message.sent.count")); + for (MetricData metric : otelTesting.getMetrics()) { + if (metricsList.remove(metric.getName())) { + if (metric.getName().equals("ibm.mq.message.retry.count")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(22); + } + if (metric.getName().equals("ibm.mq.message.received.count")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(42); + } + if (metric.getName().equals("ibm.mq.message.sent.count")) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()) + .isEqualTo(64); + } + } + } + assertThat(metricsList).isEmpty(); + } + + private static PCFMessage[] createPCFResponseForInquireChannelCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_CHANNEL, 1, true); + response1.addParameter(CMQCFC.MQCACH_CHANNEL_NAME, "my.channel"); + response1.addParameter(CMQCFC.MQIACH_CHANNEL_TYPE, CMQXC.MQCHT_SVRCONN); + response1.addParameter(CMQCFC.MQIACH_MR_COUNT, 22); + response1.addParameter(CMQCFC.MQIACH_MSGS_RECEIVED, 42); + response1.addParameter(CMQCFC.MQIACH_MSGS_SENT, 64); + response1.addParameter(CMQCFC.MQIACH_MAX_INSTANCES, 3); + response1.addParameter(CMQCFC.MQIACH_MAX_INSTS_PER_CLIENT, 3); + + return new PCFMessage[] {response1}; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollectorTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollectorTest.java new file mode 100644 index 000000000..ea46450d3 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/ListenerMetricsCollectorTest.java @@ -0,0 +1,102 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class ListenerMetricsCollectorTest { + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + ListenerMetricsCollector classUnderTest; + QueueManager queueManager; + ConfigWrapper config; + @Mock private PCFMessageAgent pcfMessageAgent; + + @BeforeEach + public void setup() throws Exception { + config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + queueManager = mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + } + + @Test + void testPublishMetrics() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))) + .thenReturn(createPCFResponseForInquireListenerStatusCmd()); + + MetricsCollectorContext context = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + classUnderTest = + new ListenerMetricsCollector( + otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq")); + classUnderTest.accept(context); + + MetricData metric = otelTesting.getMetrics().get(0); + assertThat(metric.getName()).isEqualTo("ibm.mq.listener.status"); + Set values = new HashSet<>(); + values.add(2L); + values.add(3L); + assertThat( + metric.getLongGaugeData().getPoints().stream() + .map(LongPointData::getValue) + .collect(Collectors.toSet())) + .isEqualTo(values); + } + + /* + Request + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 98 (MQCMD_INQUIRE_LISTENER_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 2] + MQCFST [type: 4, strucLength: 24, parameter: 3554 (MQCACH_LISTENER_NAME), codedCharSetId: 0, stringLength: 1, string: *] + MQCFIL [type: 5, strucLength: 24, parameter: 1223 (MQIACF_LISTENER_STATUS_ATTRS), count: 2, values: {3554, 1599}] + + Response + PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 98 (MQCMD_INQUIRE_LISTENER_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 2] + MQCFST [type: 4, strucLength: 48, parameter: 3554 (MQCACH_LISTENER_NAME), codedCharSetId: 819, stringLength: 27, string: SYSTEM.DEFAULT.LISTENER.TCP] + MQCFIN [type: 3, strucLength: 16, parameter: 1599 (MQIACH_LISTENER_STATUS), value: 2] + */ + + private static PCFMessage[] createPCFResponseForInquireListenerStatusCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_LISTENER_STATUS, 1, true); + response1.addParameter(CMQCFC.MQCACH_LISTENER_NAME, "DEV.DEFAULT.LISTENER.TCP"); + response1.addParameter(CMQCFC.MQIACH_LISTENER_STATUS, 2); + + PCFMessage response2 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_LISTENER_STATUS, 2, true); + response2.addParameter(CMQCFC.MQCACH_LISTENER_NAME, "DEV.LISTENER.TCP"); + response2.addParameter(CMQCFC.MQIACH_LISTENER_STATUS, 3); + + PCFMessage response3 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_LISTENER_STATUS, 3, true); + response3.addParameter(CMQCFC.MQCACH_LISTENER_NAME, "SYSTEM.LISTENER.TCP"); + response3.addParameter(CMQCFC.MQIACH_LISTENER_STATUS, 1); + + return new PCFMessage[] {response1, response2, response3}; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/MetricAssert.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/MetricAssert.java new file mode 100644 index 000000000..e6ddfae17 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/MetricAssert.java @@ -0,0 +1,39 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; + +public class MetricAssert { + + private final MetricData metric; + private final int pointOffset; + + public MetricAssert(MetricData metric, int pointOffset) { + this.metric = metric; + this.pointOffset = pointOffset; + } + + static MetricAssert assertThatMetric(MetricData metric, int pointOffset) { + return new MetricAssert(metric, pointOffset); + } + + MetricAssert hasName(String name) { + assertThat(metric.getName()).isEqualTo(name); + return this; + } + + MetricAssert hasValue(long value) { + assertThat( + ((LongPointData) metric.getLongGaugeData().getPoints().toArray()[this.pointOffset]) + .getValue()) + .isEqualTo(value); + return this; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddyTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddyTest.java new file mode 100644 index 000000000..5d18d73da --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueCollectionBuddyTest.java @@ -0,0 +1,339 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static io.opentelemetry.ibm.mq.metrics.IbmMqAttributes.MESSAGING_DESTINATION_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class QueueCollectionBuddyTest { + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + QueueCollectionBuddy classUnderTest; + QueueManager queueManager; + MetricsCollectorContext collectorContext; + Meter meter; + @Mock private PCFMessageAgent pcfMessageAgent; + + @BeforeEach + void setup() throws Exception { + ConfigWrapper config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + queueManager = mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + meter = otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq"); + collectorContext = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + } + + @Test + void testProcessPcfRequestAndPublishQMetricsForInquireQStatusCmd() throws Exception { + QueueCollectorSharedState sharedState = new QueueCollectorSharedState(); + sharedState.putQueueType("AMQ.5AF1608820C7D76E", "local-transmission"); + sharedState.putQueueType("DEV.DEAD.LETTER.QUEUE", "local-transmission"); + sharedState.putQueueType("DEV.QUEUE.1", "local-transmission"); + PCFMessage request = createPCFRequestForInquireQStatusCmd(); + when(pcfMessageAgent.send(request)).thenReturn(createPCFResponseForInquireQStatusCmd()); + + classUnderTest = new QueueCollectionBuddy(meter, sharedState); + classUnderTest.processPcfRequestAndPublishQMetrics( + collectorContext, request, "*", InquireQStatusCmdCollector.ATTRIBUTES); + + Map> expectedValues = + new HashMap<>( + ImmutableMap.of( + "DEV.DEAD.LETTER.QUEUE", + new HashMap<>( + ImmutableMap.of( + "ibm.mq.oldest.msg.age", -1L, + "ibm.mq.uncommitted.messages", 0L, + "ibm.mq.onqtime.short_period", -1L, + "ibm.mq.onqtime.long_period", -1L, + "ibm.mq.queue.depth", 0L)), + "DEV.QUEUE.1", + new HashMap<>( + ImmutableMap.of( + "ibm.mq.oldest.msg.age", -1L, + "ibm.mq.uncommitted.messages", 10L, + "ibm.mq.onqtime.short_period", -1L, + "ibm.mq.onqtime.long_period", -1L, + "ibm.mq.queue.depth", 1L)))); + + for (MetricData metric : otelTesting.getMetrics()) { + for (LongPointData d : metric.getLongGaugeData().getPoints()) { + String queueName = d.getAttributes().get(MESSAGING_DESTINATION_NAME); + Long expectedValue = expectedValues.get(queueName).remove(metric.getName()); + assertThat(d.getValue()).isEqualTo(expectedValue); + } + } + + for (Map metrics : expectedValues.values()) { + assertThat(metrics).isEmpty(); + } + } + + @Test + void testProcessPcfRequestAndPublishQMetricsForInquireQCmd() throws Exception { + PCFMessage request = createPCFRequestForInquireQCmd(); + when(pcfMessageAgent.send(request)).thenReturn(createPCFResponseForInquireQCmd()); + classUnderTest = new QueueCollectionBuddy(meter, new QueueCollectorSharedState()); + classUnderTest.processPcfRequestAndPublishQMetrics( + collectorContext, request, "*", InquireQCmdCollector.ATTRIBUTES); + + Map> expectedValues = + new HashMap<>( + ImmutableMap.of( + "DEV.DEAD.LETTER.QUEUE", + new HashMap<>( + ImmutableMap.of( + "ibm.mq.queue.depth", 2L, + "ibm.mq.max.queue.depth", 5000L, + "ibm.mq.open.input.count", 2L, + "ibm.mq.open.output.count", 2L)), + "DEV.QUEUE.1", + new HashMap<>( + ImmutableMap.of( + "ibm.mq.queue.depth", 3L, + "ibm.mq.max.queue.depth", 5000L, + "ibm.mq.open.input.count", 3L, + "ibm.mq.open.output.count", 3L)))); + + for (MetricData metric : otelTesting.getMetrics()) { + for (LongPointData d : metric.getLongGaugeData().getPoints()) { + String queueName = d.getAttributes().get(MESSAGING_DESTINATION_NAME); + Long expectedValue = expectedValues.get(queueName).remove(metric.getName()); + assertThat(d.getValue()).isEqualTo(expectedValue); + } + } + + for (Map metrics : expectedValues.values()) { + assertThat(metrics).isEmpty(); + } + } + + @Test + void testProcessPcfRequestAndPublishQMetricsForResetQStatsCmd() throws Exception { + QueueCollectorSharedState sharedState = new QueueCollectorSharedState(); + sharedState.putQueueType("AMQ.5AF1608820C7D76E", "local-transmission"); + sharedState.putQueueType("DEV.DEAD.LETTER.QUEUE", "local-transmission"); + sharedState.putQueueType("DEV.QUEUE.1", "local-transmission"); + PCFMessage request = createPCFRequestForResetQStatsCmd(); + when(pcfMessageAgent.send(request)).thenReturn(createPCFResponseForResetQStatsCmd()); + classUnderTest = new QueueCollectionBuddy(meter, sharedState); + classUnderTest.processPcfRequestAndPublishQMetrics( + collectorContext, request, "*", ResetQStatsCmdCollector.ATTRIBUTES); + + for (MetricData metric : otelTesting.getMetrics()) { + Iterator iterator = metric.getLongGaugeData().getPoints().iterator(); + if (metric.getName().equals("ibm.mq.high.queue.depth")) { + assertThat(iterator.next().getValue()).isEqualTo(10); + } else if (metric.getName().equals("ibm.mq.message.deq.count")) { + assertThat(iterator.next().getValue()).isEqualTo(0); + } else if (metric.getName().equals("ibm.mq.message.enq.count")) { + assertThat(iterator.next().getValue()).isEqualTo(3); + } + } + } + + /* + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 41 (MQCMD_INQUIRE_Q_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 2] + MQCFST [type: 4, strucLength: 24, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 0, stringLength: 1, string: *] + MQCFIL [type: 5, strucLength: 32, parameter: 1026 (MQIACF_Q_STATUS_ATTRS), count: 4, values: {2016, 1226, 1227, 1027}] + */ + private static PCFMessage createPCFRequestForInquireQStatusCmd() { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q_STATUS); + request.addParameter(CMQC.MQCA_Q_NAME, "*"); + request.addParameter(CMQCFC.MQIACF_Q_STATUS_ATTRS, new int[] {2016, 1226, 1227, 1027}); + return request; + } + + /* + 0 = {PCFMessage@6026} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 2, command: 41 (MQCMD_INQUIRE_Q_STATUS), msgSeqNumber: 1, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: AMQ.5AF1608820C7D76E ] + MQCFIN [type: 3, strucLength: 16, parameter: 1103 (MQIACF_Q_STATUS_TYPE), value: 1105] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 12] + MQCFIN [type: 3, strucLength: 16, parameter: 1227 (MQIACF_OLDEST_MSG_AGE), value: -1] + MQCFIL [type: 5, strucLength: 24, parameter: 1226 (MQIACF_Q_TIME_INDICATOR), count: 2, values: {-1, -1}] + MQCFIN [type: 3, strucLength: 16, parameter: 1027 (MQIACF_UNCOMMITTED_MSGS), value: 0]" + + 1 = {PCFMessage@6029} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 2, command: 41 (MQCMD_INQUIRE_Q_STATUS), msgSeqNumber: 2, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: DEV.DEAD.LETTER.QUEUE ] + MQCFIN [type: 3, strucLength: 16, parameter: 1103 (MQIACF_Q_STATUS_TYPE), value: 1105] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 1227 (MQIACF_OLDEST_MSG_AGE), value: -1] + MQCFIL [type: 5, strucLength: 24, parameter: 1226 (MQIACF_Q_TIME_INDICATOR), count: 2, values: {-1, -1}] + MQCFIN [type: 3, strucLength: 16, parameter: 1027 (MQIACF_UNCOMMITTED_MSGS), value: 0]" + + 2 = {PCFMessage@6030} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 2, command: 41 (MQCMD_INQUIRE_Q_STATUS), msgSeqNumber: 3, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: DEV.QUEUE.1 ] + MQCFIN [type: 3, strucLength: 16, parameter: 1103 (MQIACF_Q_STATUS_TYPE), value: 1105] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 1227 (MQIACF_OLDEST_MSG_AGE), value: -1] + MQCFIL [type: 5, strucLength: 24, parameter: 1226 (MQIACF_Q_TIME_INDICATOR), count: 2, values: {-1, -1}] + MQCFIN [type: 3, strucLength: 16, parameter: 1027 (MQIACF_UNCOMMITTED_MSGS), value: 0]" + */ + private static PCFMessage[] createPCFResponseForInquireQStatusCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q_STATUS, 1, false); + response1.addParameter(CMQC.MQCA_Q_NAME, "AMQ.5AF1608820C7D76E"); + response1.addParameter(CMQCFC.MQIACF_Q_STATUS_TYPE, 1105); + response1.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 12); + response1.addParameter(CMQCFC.MQIACF_OLDEST_MSG_AGE, -1); + response1.addParameter(CMQCFC.MQIACF_Q_TIME_INDICATOR, new int[] {-1, -1}); + response1.addParameter(CMQCFC.MQIACF_UNCOMMITTED_MSGS, 0); + + PCFMessage response2 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q_STATUS, 2, false); + response2.addParameter(CMQC.MQCA_Q_NAME, "DEV.DEAD.LETTER.QUEUE"); + response2.addParameter(CMQCFC.MQIACF_Q_STATUS_TYPE, 1105); + response2.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 0); + response2.addParameter(CMQCFC.MQIACF_OLDEST_MSG_AGE, -1); + response2.addParameter(CMQCFC.MQIACF_Q_TIME_INDICATOR, new int[] {-1, -1}); + response2.addParameter(CMQCFC.MQIACF_UNCOMMITTED_MSGS, 0); + + PCFMessage response3 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q_STATUS, 1, false); + response3.addParameter(CMQC.MQCA_Q_NAME, "DEV.QUEUE.1"); + response3.addParameter(CMQCFC.MQIACF_Q_STATUS_TYPE, 1105); + response3.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 1); + response3.addParameter(CMQCFC.MQIACF_OLDEST_MSG_AGE, -1); + response3.addParameter(CMQCFC.MQIACF_Q_TIME_INDICATOR, new int[] {-1, -1}); + response3.addParameter(CMQCFC.MQIACF_UNCOMMITTED_MSGS, 10); + + return new PCFMessage[] {response1, response2, response3}; + } + + /* + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 13 (MQCMD_INQUIRE_Q), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 3] + MQCFST [type: 4, strucLength: 24, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 0, stringLength: 1, string: *] + MQCFIN [type: 3, strucLength: 16, parameter: 20 (MQIA_Q_TYPE), value: 1001] + MQCFIL [type: 5, strucLength: 36, parameter: 1002 (MQIACF_Q_ATTRS), count: 5, values: {2016, 15, 3, 17, 18}] + */ + private static PCFMessage createPCFRequestForInquireQCmd() { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_INQUIRE_Q); + request.addParameter(CMQC.MQCA_Q_NAME, "*"); + request.addParameter(CMQC.MQIA_Q_TYPE, CMQC.MQQT_ALL); + request.addParameter(CMQCFC.MQIACF_Q_ATTRS, new int[] {2016, 15, 3, 17, 18}); + return request; + } + + /* + 0 = {PCFMessage@6059} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 13 (MQCMD_INQUIRE_Q), msgSeqNumber: 1, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: AMQ.5AF1608820C76D80 ] + MQCFIN [type: 3, strucLength: 16, parameter: 20 (MQIA_Q_TYPE), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 17 (MQIA_OPEN_INPUT_COUNT), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 15 (MQIA_MAX_Q_DEPTH), value: 5000] + MQCFIN [type: 3, strucLength: 16, parameter: 18 (MQIA_OPEN_OUTPUT_COUNT), value: 1]" + + 1 = {PCFMessage@6060} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 13 (MQCMD_INQUIRE_Q), msgSeqNumber: 2, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: DEV.DEAD.LETTER.QUEUE ] + MQCFIN [type: 3, strucLength: 16, parameter: 20 (MQIA_Q_TYPE), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 17 (MQIA_OPEN_INPUT_COUNT), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 15 (MQIA_MAX_Q_DEPTH), value: 5000] + MQCFIN [type: 3, strucLength: 16, parameter: 18 (MQIA_OPEN_OUTPUT_COUNT), value: 0]" + + 2 = {PCFMessage@6061} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 13 (MQCMD_INQUIRE_Q), msgSeqNumber: 3, control: 0, compCode: 0, reason: 0, parameterCount: 6] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: DEV.QUEUE.1 ] + MQCFIN [type: 3, strucLength: 16, parameter: 20 (MQIA_Q_TYPE), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 3 (MQIA_CURRENT_Q_DEPTH), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 17 (MQIA_OPEN_INPUT_COUNT), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 15 (MQIA_MAX_Q_DEPTH), value: 5000] + MQCFIN [type: 3, strucLength: 16, parameter: 18 (MQIA_OPEN_OUTPUT_COUNT), value: 0]" + */ + + private static PCFMessage[] createPCFResponseForInquireQCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q, 1, false); + response1.addParameter(CMQC.MQCA_Q_NAME, "AMQ.5AF1608820C76D80"); + response1.addParameter(CMQC.MQIA_Q_TYPE, 1); + response1.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 1); + response1.addParameter(CMQC.MQIA_OPEN_INPUT_COUNT, 1); + response1.addParameter(CMQC.MQIA_MAX_Q_DEPTH, 5000); + response1.addParameter(CMQC.MQIA_OPEN_OUTPUT_COUNT, 1); + response1.addParameter(CMQC.MQIA_USAGE, CMQC.MQUS_NORMAL); + + PCFMessage response2 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q, 2, false); + response2.addParameter(CMQC.MQCA_Q_NAME, "DEV.DEAD.LETTER.QUEUE"); + response2.addParameter(CMQC.MQIA_Q_TYPE, 1); + response2.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 2); + response2.addParameter(CMQC.MQIA_OPEN_INPUT_COUNT, 2); + response2.addParameter(CMQC.MQIA_MAX_Q_DEPTH, 5000); + response2.addParameter(CMQC.MQIA_OPEN_OUTPUT_COUNT, 2); + response2.addParameter(CMQC.MQIA_USAGE, CMQC.MQUS_TRANSMISSION); + + PCFMessage response3 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q, 3, false); + response3.addParameter(CMQC.MQCA_Q_NAME, "DEV.QUEUE.1"); + response3.addParameter(CMQC.MQIA_Q_TYPE, 1); + response3.addParameter(CMQC.MQIA_CURRENT_Q_DEPTH, 3); + response3.addParameter(CMQC.MQIA_OPEN_INPUT_COUNT, 3); + response3.addParameter(CMQC.MQIA_MAX_Q_DEPTH, 5000); + response3.addParameter(CMQC.MQIA_OPEN_OUTPUT_COUNT, 3); + response3.addParameter(CMQC.MQIA_USAGE, CMQC.MQUS_TRANSMISSION); + + return new PCFMessage[] {response1, response2, response3}; + } + + /* + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 17 (MQCMD_RESET_Q_STATS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 1] + MQCFST [type: 4, strucLength: 24, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 0, stringLength: 1, string: *] + */ + private static PCFMessage createPCFRequestForResetQStatsCmd() { + PCFMessage request = new PCFMessage(CMQCFC.MQCMD_RESET_Q_STATS); + request.addParameter(CMQC.MQCA_Q_NAME, "*"); + return request; + } + + /* + 0 = {PCFMessage@6144} "PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 17 (MQCMD_RESET_Q_STATS), msgSeqNumber: 1, control: 0, compCode: 0, reason: 0, parameterCount: 5] + MQCFST [type: 4, strucLength: 68, parameter: 2016 (MQCA_Q_NAME), codedCharSetId: 819, stringLength: 48, string: DEV.DEAD.LETTER.QUEUE ] + MQCFIN [type: 3, strucLength: 16, parameter: 37 (MQIA_MSG_ENQ_COUNT), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 38 (MQIA_MSG_DEQ_COUNT), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 36 (MQIA_HIGH_Q_DEPTH), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 35 (MQIA_TIME_SINCE_RESET), value: 65]" + */ + private static PCFMessage[] createPCFResponseForResetQStatsCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_RESET_Q_STATS, 1, false); + response1.addParameter(CMQC.MQCA_Q_NAME, "DEV.DEAD.LETTER.QUEUE"); + response1.addParameter(CMQC.MQIA_MSG_ENQ_COUNT, 3); + response1.addParameter(CMQC.MQIA_MSG_DEQ_COUNT, 0); + response1.addParameter(CMQC.MQIA_HIGH_Q_DEPTH, 10); + response1.addParameter(CMQC.MQIA_TIME_SINCE_RESET, 65); + + return new PCFMessage[] {response1}; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollectorTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollectorTest.java new file mode 100644 index 000000000..3c6ef1503 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/QueueManagerMetricsCollectorTest.java @@ -0,0 +1,121 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class QueueManagerMetricsCollectorTest { + + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + QueueManagerMetricsCollector classUnderTest; + QueueManager queueManager; + MetricsCollectorContext context; + @Mock PCFMessageAgent pcfMessageAgent; + + @BeforeEach + public void setup() throws Exception { + + ConfigWrapper config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + queueManager = mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + context = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + } + + @Test + void testProcessPCFRequestAndPublishQMetricsForInquireQStatusCmd() throws Exception { + when(pcfMessageAgent.send(any(PCFMessage.class))) + .thenReturn(createPCFResponseForInquireQMgrStatusCmd()); + classUnderTest = + new QueueManagerMetricsCollector( + otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq")); + classUnderTest.accept(context); + List metricsList = new ArrayList<>(singletonList("ibm.mq.manager.status")); + + for (MetricData metric : otelTesting.getMetrics()) { + if (metricsList.remove(metric.getName())) { + assertThat(metric.getLongGaugeData().getPoints().iterator().next().getValue()).isEqualTo(2); + } + } + assertThat(metricsList).isEmpty(); + } + + /* Request + PCFMessage: + MQCFH [type: 1, strucLength: 36, version: 1, command: 161 (MQCMD_INQUIRE_Q_MGR_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 1] + MQCFIL [type: 5, strucLength: 20, parameter: 1229 (MQIACF_Q_MGR_STATUS_ATTRS), count: 1, values: {1009}] + + Response + PCFMessage: + MQCFH [type: 2, strucLength: 36, version: 1, command: 161 (MQCMD_INQUIRE_Q_MGR_STATUS), msgSeqNumber: 1, control: 1, compCode: 0, reason: 0, parameterCount: 23] + MQCFST [type: 4, strucLength: 68, parameter: 2015 (MQCA_Q_MGR_NAME), codedCharSetId: 819, stringLength: 48, string: QM1 ] + MQCFIN [type: 3, strucLength: 16, parameter: 1149 (MQIACF_Q_MGR_STATUS), value: 2] + MQCFST [type: 4, strucLength: 20, parameter: 3208 (null), codedCharSetId: 819, stringLength: 0, string: ] + MQCFIN [type: 3, strucLength: 16, parameter: 1416 (null), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 1232 (MQIACF_CHINIT_STATUS), value: 2] + MQCFIN [type: 3, strucLength: 16, parameter: 1233 (MQIACF_CMD_SERVER_STATUS), value: 2] + MQCFIN [type: 3, strucLength: 16, parameter: 1230 (MQIACF_CONNECTION_COUNT), value: 23] + MQCFST [type: 4, strucLength: 20, parameter: 3071 (MQCACF_CURRENT_LOG_EXTENT_NAME), codedCharSetId: 819, stringLength: 0, string: ] + MQCFST [type: 4, strucLength: 20, parameter: 2115 (null), codedCharSetId: 819, stringLength: 0, string: ] + MQCFST [type: 4, strucLength: 36, parameter: 2116 (null), codedCharSetId: 819, stringLength: 13, string: Installation1] + MQCFST [type: 4, strucLength: 28, parameter: 2117 (null), codedCharSetId: 819, stringLength: 8, string: /opt/mqm] + MQCFIN [type: 3, strucLength: 16, parameter: 1409 (null), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 1420 (null), value: 9] + MQCFST [type: 4, strucLength: 44, parameter: 3074 (MQCACF_LOG_PATH), codedCharSetId: 819, stringLength: 24, string: /var/mqm/log/QM1/active/] + MQCFIN [type: 3, strucLength: 16, parameter: 1421 (null), value: 9] + MQCFST [type: 4, strucLength: 20, parameter: 3073 (MQCACF_MEDIA_LOG_EXTENT_NAME), codedCharSetId: 819, stringLength: 0, string: ] + MQCFIN [type: 3, strucLength: 16, parameter: 1417 (null), value: 0] + MQCFST [type: 4, strucLength: 20, parameter: 3072 (MQCACF_RESTART_LOG_EXTENT_NAME), codedCharSetId: 819, stringLength: 0, string: ] + MQCFIN [type: 3, strucLength: 16, parameter: 1418 (null), value: 1] + MQCFIN [type: 3, strucLength: 16, parameter: 1419 (null), value: 0] + MQCFIN [type: 3, strucLength: 16, parameter: 1325 (null), value: 0] + MQCFST [type: 4, strucLength: 32, parameter: 3175 (null), codedCharSetId: 819, stringLength: 12, string: 2018-05-08 ] + MQCFST [type: 4, strucLength: 28, parameter: 3176 (null), codedCharSetId: 819, stringLength: 8, string: 08.32.08] + */ + + private static PCFMessage[] createPCFResponseForInquireQMgrStatusCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_Q_MGR_STATUS, 1, true); + response1.addParameter(CMQC.MQCA_Q_MGR_NAME, "QM1"); + response1.addParameter(CMQCFC.MQIACF_Q_MGR_STATUS, 2); + response1.addParameter(CMQCFC.MQIACF_CHINIT_STATUS, 2); + response1.addParameter(CMQCFC.MQIACF_CMD_SERVER_STATUS, 2); + response1.addParameter(CMQCFC.MQIACF_CONNECTION_COUNT, 23); + response1.addParameter(CMQCFC.MQCACF_CURRENT_LOG_EXTENT_NAME, ""); + response1.addParameter(CMQCFC.MQCACF_LOG_PATH, "/var/mqm/log/QM1/active/"); + response1.addParameter(CMQCFC.MQCACF_MEDIA_LOG_EXTENT_NAME, ""); + response1.addParameter(CMQCFC.MQCACF_RESTART_LOG_EXTENT_NAME, ""); + response1.addParameter(CMQCFC.MQIACF_RESTART_LOG_SIZE, 42); + response1.addParameter(CMQCFC.MQIACF_REUSABLE_LOG_SIZE, 42); + response1.addParameter(CMQCFC.MQIACF_ARCHIVE_LOG_SIZE, 42); + + return new PCFMessage[] {response1}; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollectorTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollectorTest.java new file mode 100644 index 000000000..646d4ee98 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/metricscollector/TopicMetricsCollectorTest.java @@ -0,0 +1,113 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.metricscollector; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.mq.constants.CMQC; +import com.ibm.mq.constants.CMQCFC; +import com.ibm.mq.headers.pcf.PCFMessage; +import com.ibm.mq.headers.pcf.PCFMessageAgent; +import io.opentelemetry.ibm.mq.config.QueueManager; +import io.opentelemetry.ibm.mq.metrics.MetricsConfig; +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.junit5.OpenTelemetryExtension; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TopicMetricsCollectorTest { + @RegisterExtension + static final OpenTelemetryExtension otelTesting = OpenTelemetryExtension.create(); + + TopicMetricsCollector classUnderTest; + QueueManager queueManager; + ConfigWrapper config; + @Mock private PCFMessageAgent pcfMessageAgent; + + @BeforeEach + void setup() throws Exception { + config = ConfigWrapper.parse("src/test/resources/conf/config.yml"); + ObjectMapper mapper = new ObjectMapper(); + queueManager = mapper.convertValue(config.getQueueManagers().get(0), QueueManager.class); + } + + @Test + void testPublishMetrics() throws Exception { + MetricsCollectorContext context = + new MetricsCollectorContext(queueManager, pcfMessageAgent, null, new MetricsConfig(config)); + classUnderTest = + new TopicMetricsCollector(otelTesting.getOpenTelemetry().getMeter("opentelemetry.io/mq")); + + when(pcfMessageAgent.send(any(PCFMessage.class))) + .thenReturn(createPCFResponseForInquireTopicStatusCmd()); + + classUnderTest.accept(context); + + List metricsList = + new ArrayList<>(Arrays.asList("ibm.mq.publish.count", "ibm.mq.subscription.count")); + + for (MetricData metric : otelTesting.getMetrics()) { + if (metricsList.remove(metric.getName())) { + if (metric.getName().equals("ibm.mq.publish.count")) { + Set values = new HashSet<>(); + values.add(2L); + values.add(3L); + assertThat( + metric.getLongGaugeData().getPoints().stream() + .map(LongPointData::getValue) + .collect(Collectors.toSet())) + .isEqualTo(values); + } + if (metric.getName().equals("ibm.mq.subscription.count")) { + Set values = new HashSet<>(); + values.add(3L); + values.add(4L); + assertThat( + metric.getLongGaugeData().getPoints().stream() + .map(LongPointData::getValue) + .collect(Collectors.toSet())) + .isEqualTo(values); + } + } + } + assertThat(metricsList).isEmpty(); + } + + private static PCFMessage[] createPCFResponseForInquireTopicStatusCmd() { + PCFMessage response1 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_TOPIC_STATUS, 1, false); + response1.addParameter(CMQC.MQCA_TOPIC_STRING, "test"); + response1.addParameter(CMQC.MQIA_PUB_COUNT, 2); + response1.addParameter(CMQC.MQIA_SUB_COUNT, 3); + + PCFMessage response2 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_TOPIC_STATUS, 2, false); + response2.addParameter(CMQC.MQCA_TOPIC_STRING, "dev"); + response2.addParameter(CMQC.MQIA_PUB_COUNT, 3); + response2.addParameter(CMQC.MQIA_SUB_COUNT, 4); + + PCFMessage response3 = new PCFMessage(2, CMQCFC.MQCMD_INQUIRE_TOPIC_STATUS, 3, false); + response3.addParameter(CMQC.MQCA_TOPIC_STRING, "system"); + response3.addParameter(CMQC.MQIA_PUB_COUNT, 5); + response3.addParameter(CMQC.MQIA_SUB_COUNT, 6); + + return new PCFMessage[] {response1, response2, response3}; + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigTest.java new file mode 100644 index 000000000..73aadc6a1 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigTest.java @@ -0,0 +1,54 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.opentelemetry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableMap; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ConfigTest { + + private Properties systemProperties; + + @BeforeEach + public void cacheSystemProperties() { + systemProperties = new Properties(); + for (Map.Entry entry : System.getProperties().entrySet()) { + systemProperties.put(entry.getKey().toString(), entry.getValue().toString()); + } + } + + @Test + void testSSLConnection() { + Config.setUpSslConnection( + new HashMap( + ImmutableMap.of( + "keyStorePath", "foo", + "trustStorePath", "bar", + "keyStorePassword", "password", + "trustStorePassword", "password1"))); + + assertThat(System.getProperties().get("javax.net.ssl.keyStore")).isEqualTo("foo"); + assertThat(System.getProperties().get("javax.net.ssl.trustStorePath")).isEqualTo("bar"); + assertThat(System.getProperties().get("javax.net.ssl.keyStorePassword")).isEqualTo("password"); + assertThat(System.getProperties().get("javax.net.ssl.trustStorePassword")) + .isEqualTo("password1"); + } + + @AfterEach + public void resetSystemProperties() { + System.getProperties().clear(); + for (Map.Entry entry : systemProperties.entrySet()) { + System.setProperty(entry.getKey().toString(), entry.getValue().toString()); + } + } +} diff --git a/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapperTest.java b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapperTest.java new file mode 100644 index 000000000..3d2397cc0 --- /dev/null +++ b/ibm-mq-metrics/src/test/java/io/opentelemetry/ibm/mq/opentelemetry/ConfigWrapperTest.java @@ -0,0 +1,51 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.ibm.mq.opentelemetry; + +import static java.util.Collections.singletonList; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ConfigWrapperTest { + + String file; + + @BeforeEach + void setUp() { + file = ConfigWrapperTest.class.getResource("/conf/config.yml").getFile(); + // Windows resources can contain a colon, which can't be mapped to a Path cleanly + // They look like /D:/a/path/to/whatever + file = file.replaceFirst("^/([A-Z]:)/", "$1/"); + } + + @Test + void testQueueManagerNames() throws Exception { + ConfigWrapper config = ConfigWrapper.parse(file); + assertThat(config.getQueueManagerNames()).isEqualTo(singletonList("QM1")); + } + + @Test + void testNumberOfThreads() throws Exception { + ConfigWrapper config = ConfigWrapper.parse(file); + assertThat(config.getNumberOfThreads()).isEqualTo(20); + } + + @Test + void testTaskDelay() throws Exception { + ConfigWrapper config = ConfigWrapper.parse(file); + assertThat(config.getTaskDelay()).isEqualTo(Duration.of(27, ChronoUnit.SECONDS)); + } + + @Test + void testTaskInitialDelay() throws Exception { + ConfigWrapper config = ConfigWrapper.parse(file); + assertThat(config.getTaskInitialDelaySeconds()).isEqualTo(0); + } +} diff --git a/ibm-mq-metrics/src/test/resources/conf/config.yml b/ibm-mq-metrics/src/test/resources/conf/config.yml new file mode 100644 index 000000000..51a11d53f --- /dev/null +++ b/ibm-mq-metrics/src/test/resources/conf/config.yml @@ -0,0 +1,217 @@ +#This is the timeout on queue metrics and channel metrics threads.Default value is 20 seconds. +#No need to change the default unless you know what you are doing. +#queueMetricsCollectionTimeoutInSeconds: 40 +#channelMetricsCollectionTimeoutInSeconds: 40 +#topicMetricsCollectionTimeoutInSeconds: 40 + +queueManagers: + - name: "QM1" + host: "localhost" + port: 1414 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Client" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + channelName: "DEV.ADMIN.SVRCONN" + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + username: "app" + password: "passw0rd" + + #PCF requests are always sent to SYSTEM.ADMIN.COMMAND.QUEUE. The PCF responses to these requests are sent to the default reply-to queue called + #SYSTEM.DEFAULT.MODEL.QUEUE. However, you can override this behavior and send it to a temporary dynamic queue by changing the modelQueueName and replyQueuePrefix fields. + #For more details around this https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q083240_.htm & https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.adm.doc/q020010_.htm + #modelQueueName: "" + #replyQueuePrefix: "" + + # Name of the temporary dynamic queue holding the configuration events. This queue contains information regarding the configuration of the queue manager, notable MaxChannels and MaxActiveChannels. + # If unset, the default queue name `SYSTEM.ADMIN.CONFIG.EVENT` is applied. + # Configuration events need to be enabled explicitly in the queue manager configuration. See https://www.ibm.com/docs/en/ibm-mq/9.4.x?topic=monitoring-configuration-events for reference. + #configurationQueueName: "SYSTEM.ADMIN.CONFIG.EVENT" + + # Interval in milliseconds at which the configuration events in the configuration queue can be consumed. + # By default, no events are consumed. + #consumeConfigurationEventInterval: 600000 # 10 minutes + + # Enable running a queue manager refresh request to reload its configuration and create a configuration event. + # This action is only executed if no configuration events are found when reading the configuration queue.name: + # By default, this action is disabled. + #refreshQueueManagerConfigurationEnabled: false + + #Sets the CCSID used in the message descriptor of request and response messages. The default value is MQC.MQCCSI_Q_MGR. + #To set this, please use the integer value. + #ccsid: + + #Sets the encoding used in the message descriptor of request and response messages. The default value is MQC.MQENC_NATIVE. + #To set this, please use the integer value. + #encoding: + + # IBM Cipher Suite e.g. "SSL_RSA_WITH_AES_128_CBC_SHA256".. + # For translation to IBM Cipher http://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.dev.doc/q113210_.htm + # A cipher working for IBM Cloud MQ and Temurin JDK 8 is TLS_AES_128_GCM_SHA256 + #cipherSuite: "" + + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM", "TEST"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "EQUALS" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["system","$SYS"] + +metrics: + "ibm.mq.message.retry.count": # Number of message retries + enabled: true + "ibm.mq.status": # Channel status + enabled: true + "ibm.mq.max.sharing.conversations": # Maximum number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.current.sharing.conversations": # Current number of conversations permitted on this channel instance. + enabled: true + "ibm.mq.byte.received": # Number of bytes received + enabled: true + "ibm.mq.byte.sent": # Number of bytes sent + enabled: true + "ibm.mq.buffers.received": # Buffers received + enabled: true + "ibm.mq.buffers.sent": # Buffers sent + enabled: true + "ibm.mq.message.count": # Message count + enabled: true + "ibm.mq.open.input.count": # Count of applications sending messages to the queue + enabled: true + "ibm.mq.open.output.count": # Count of applications consuming messages from the queue + enabled: true + "ibm.mq.high.queue.depth": # The current high queue depth + enabled: true + "ibm.mq.service.interval": # The queue service interval + enabled: true + "ibm.mq.queue.depth.full.event": # The number of full queue events + enabled: true + "ibm.mq.queue.depth.high.event": # The number of high queue events + enabled: true + "ibm.mq.queue.depth.low.event": # The number of low queue events + enabled: true + "ibm.mq.uncommitted.messages": # Number of uncommitted messages + enabled: true + "ibm.mq.oldest.msg.age": # Queue message oldest age + enabled: true + "ibm.mq.current.max.queue.filesize": # Current maximum queue file size + enabled: true + "ibm.mq.current.queue.filesize": # Current queue file size + enabled: true + "ibm.mq.instances.per.client": # Instances per client + enabled: true + "ibm.mq.message.deq.count": # Message dequeue count + enabled: true + "ibm.mq.message.enq.count": # Message enqueue count + enabled: true + "ibm.mq.queue.depth": # Current queue depth + enabled: true + "ibm.mq.service.interval.event": # Queue service interval event + enabled: true + "ibm.mq.reusable.log.size": # The amount of space occupied, in megabytes, by log extents available to be reused. + enabled: true + "ibm.mq.manager.active.channels": # The queue manager active maximum channels limit + enabled: true + "ibm.mq.restart.log.size": # Size of the log data required for restart recovery in megabytes. + enabled: true + "ibm.mq.max.queue.depth": # Maximum queue depth + enabled: true + "ibm.mq.onqtime.short_period": # Amount of time, in microseconds, that a message spent on the queue, over a short period + enabled: true + "ibm.mq.onqtime.long_period": # Amount of time, in microseconds, that a message spent on the queue, over a longer period + enabled: true + "ibm.mq.message.received.count": # Number of messages received + enabled: true + "ibm.mq.message.sent.count": # Number of messages sent + enabled: true + "ibm.mq.max.instances": # Max channel instances + enabled: true + "ibm.mq.connection.count": # Active connections count + enabled: true + "ibm.mq.manager.status": # Queue manager status + enabled: true + "ibm.mq.heartbeat": # Queue manager heartbeat + enabled: true + "ibm.mq.archive.log.size": # Queue manager archive log size + enabled: true + "ibm.mq.manager.max.active.channels": # Queue manager max active channels + enabled: true + "ibm.mq.manager.statistics.interval": # Queue manager statistics interval + enabled: true + "ibm.mq.publish.count": # Topic publication count + enabled: true + "ibm.mq.subscription.count": # Topic subscription count + enabled: true + "ibm.mq.listener.status": # Listener status + enabled: true + "ibm.mq.unauthorized.event": # Number of authentication error events + enabled: true + "ibm.mq.manager.max.handles": # Max open handles + enabled: true + +#Run it as a scheduled task instead of running every minute. +#If you want to run this every minute, comment this out +taskSchedule: + numberOfThreads: 1 + taskDelaySeconds: 27 + + +sslConnection: + trustStorePath: "" + trustStorePassword: "" + + keyStorePath: "" + keyStorePassword: "" + + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.exporter.otlp.endpoint: https://localhost:4318 + otel.exporter.otlp.protocol: http/protobuf + otel.metric.export.interval: 5s + otel.logs.exporter: none + otel.traces.exporter: none diff --git a/ibm-mq-metrics/templates/registry/java/IbmMqAttributes.java.j2 b/ibm-mq-metrics/templates/registry/java/IbmMqAttributes.java.j2 new file mode 100644 index 000000000..2fcbdd64b --- /dev/null +++ b/ibm-mq-metrics/templates/registry/java/IbmMqAttributes.java.j2 @@ -0,0 +1,27 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ +package io.opentelemetry.ibm.mq.metrics; + +import static io.opentelemetry.api.common.AttributeKey.stringKey; +import static io.opentelemetry.api.common.AttributeKey.longKey; +import io.opentelemetry.api.common.AttributeKey; + +// This file is generated using weaver. Do not edit manually. + +/** Attribute definitions generated from a Weaver model. Do not edit manually. */ +public final class IbmMqAttributes { +{% for attr in ctx %} + /** + {{ attr.brief }} */{% if attr.type == 'string' %} + public final static AttributeKey {{ attr.name.upper().split('.')|join('_') }} = stringKey("{{attr.name}}"); + {% elif attr.type == 'int' %} + public final static AttributeKey {{ attr.name.upper().split('.')|join('_') }} = longKey("{{attr.name}}"); + {% else %} + // UNHANDLED TYPE PLEASE FIXME + public final static AttributeKey {{ attr.name.upper().split('.')|join('_') }} = ??key("{{attr.name}}"); + {% endif %} +{% endfor %} + private IbmMqAttributes(){} +} diff --git a/ibm-mq-metrics/templates/registry/java/Metrics.java.j2 b/ibm-mq-metrics/templates/registry/java/Metrics.java.j2 new file mode 100644 index 000000000..1b0097826 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/java/Metrics.java.j2 @@ -0,0 +1,40 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ +package io.opentelemetry.ibm.mq.metrics; + +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; +import java.util.function.Function; + +// This file is generated using weaver. Do not edit manually. + +/** Metric definitions generated from a Weaver model. Do not edit manually. */ +public final class Metrics { +public final static Function MIBY_TO_BYTES = x -> x * 1024L * 1024L; +private Metrics(){ +} +{% for metric in ctx %} + + {% if metric.instrument == "gauge" %} + public static LongGauge create{{ metric.metric_name.replace("_", ".")|split('.')|map('capitalize')|join }}(Meter meter) { + return meter + .gaugeBuilder("{{ metric.metric_name }}") + .ofLongs() + .setUnit("{{ metric.unit }}") + .setDescription("{{ metric.brief }}") + .build(); + } + {% elif metric.instrument == "counter" %} + public static LongCounter create{{ metric.metric_name.replace("_", ".")|split('.')|map('capitalize')|join }}(Meter meter) { + return meter + .counterBuilder("{{ metric.metric_name }}") + .setUnit("{{ metric.unit }}") + .setDescription("{{ metric.brief }}") + .build(); + } + {% endif %} +{% endfor %} +} diff --git a/ibm-mq-metrics/templates/registry/java/MetricsConfig.java.j2 b/ibm-mq-metrics/templates/registry/java/MetricsConfig.java.j2 new file mode 100644 index 000000000..df6fae562 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/java/MetricsConfig.java.j2 @@ -0,0 +1,36 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ +package io.opentelemetry.ibm.mq.metrics; + +import io.opentelemetry.ibm.mq.opentelemetry.ConfigWrapper; +import java.util.Map; + +// This file is generated using weaver. Do not edit manually. + +/** Configuration of metrics as defined in config.yml. */ +public final class MetricsConfig { + + private final Map config; + + public MetricsConfig(ConfigWrapper config) { + this.config = config.getMetrics(); + } +{% for metric in ctx %} + public boolean is{{ metric.metric_name.replace("_", ".")|split('.')|map('capitalize')|join }}Enabled() { + return isEnabled("{{ metric.metric_name }}"); + } +{% endfor %} + private boolean isEnabled(String key) { + Object metricInfo = config.get(key); + if (!(metricInfo instanceof Map)) { + return false; + } + Object enabled = ((Map) metricInfo).get("enabled"); + if (enabled instanceof Boolean) { + return (Boolean) enabled; + } + return false; + } +} diff --git a/ibm-mq-metrics/templates/registry/java/weaver.yaml b/ibm-mq-metrics/templates/registry/java/weaver.yaml new file mode 100644 index 000000000..ece71c233 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/java/weaver.yaml @@ -0,0 +1,10 @@ +templates: + - template: Metrics.java.j2 + filter: '.groups | map(select(.type == "metric"))' + application_mode: single + - template: MetricsConfig.java.j2 + filter: '.groups | map(select(.type == "metric"))' + application_mode: single + - template: IbmMqAttributes.java.j2 + filter: '.groups | map(select(.type == "attribute_group")) | map(.attributes[])' + application_mode: single diff --git a/ibm-mq-metrics/templates/registry/markdown/attribute_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/attribute_macros.j2 new file mode 100644 index 000000000..9c0fea34e --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/attribute_macros.j2 @@ -0,0 +1,36 @@ +{% import 'examples_macros.j2' as examples %} +{% macro type(attribute) %}{%- if attribute.type is mapping %} +{%- if attribute.type.members[0].value is string %}string{%- endif %} +{%- if attribute.type.members[0].value is int %}int{%- endif %} +{%- if attribute.type.members[0].value is float %}double{%- endif %} +{%- elif attribute.type == "template[boolean]" %}boolean +{%- elif attribute.type == "template[int]" %}int +{%- elif attribute.type == "template[double]" %}double +{%- elif attribute.type == "template[string]" %}string +{%- elif attribute.type == "template[boolean[]]" %}boolean[] +{%- elif attribute.type == "template[int[]]" %}int[] +{%- elif attribute.type == "template[double[]]" %}double[] +{%- elif attribute.type == "template[string[]]" %}string[] +{%- else %}{{ attribute.type | trim }}{%- endif %}{% endmacro %} + +{% macro name(attribute) %}{%- if attribute.type is startingwith("template[") %}`{{ attribute.name }}.` +{%- else %}`{{ attribute.name }}`{%- endif %}{% endmacro %} + +{% macro find_lineage(attr_id, lineage) %}{% if attr_id in lineage %}{{lineage[attr_id].source_group}}{% endif %}{% endmacro %} + +{% macro name_with_link(attribute, attribute_registry_base_url, lineage_attributes) %}[{{name(attribute)}}]({{attribute_registry_base_url}}/{{ find_lineage(attribute.name, lineage_attributes) | split_id | list | reject("eq", "registry")| first | kebab_case }}.md){% endmacro %} + +{% macro display_name(group) %} +{%- if 'display_name' in group %}{{ group.display_name }} +{%- else %}{{ group.id | split_id | list | reject("eq", "registry") | join(" ") | title_case | acronym }} Attributes +{%- endif %}{% endmacro %} + +{% macro heading_link_fragments(title) %}{{ title | trim | lower | replace(" ", "-") | replace("(", "") | replace(")", "") | replace("/", "") | replace("\\", "") | replace(".", "") | replace("!", "") | replace("?", "") | replace("~", "") | replace("#", "")}}{% endmacro %} + +{% macro humanize(text) %} + {{- text.replace('_', ' ') -}} +{% endmacro %} + +{% macro sentence_case(text) %} + {{- text[:1].upper() + text[1:].lower() -}} +{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/attribute_namespace.md.j2 b/ibm-mq-metrics/templates/registry/markdown/attribute_namespace.md.j2 new file mode 100644 index 000000000..e55c288b3 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/attribute_namespace.md.j2 @@ -0,0 +1,51 @@ +{#- This template is rendered per top-level registry namespace. -#} +{#- It consists of two variables: -#} +{#- - id: The top-level namespace id. -#} +{#- - groups: A sequence of all attribute groups under this namespace. -#} +{#- This includes deprecated groups. -#} +{%- import 'stability.j2' as stability -%} +{%- import 'notes.j2' as notes -%} +{%- import 'enum_macros.j2' as enums -%} +{%- import 'attribute_macros.j2' as attrs -%} +{%- import 'examples_macros.j2' as examples -%} +{%- set my_file_name = ctx.id | lower | kebab_case ~ ".md" -%} +{{- template.set_file_name(my_file_name) -}} +{%- set groups = namespace(deprecated=[], non_deprecated=[]) -%} +{%- for group in ctx.groups | sort(attribute="id") -%} +{%- if group.id[-10:] == "deprecated" -%} +{%- set groups.deprecated = groups.deprecated + [group] -%} +{%- else -%} +{%- set groups.non_deprecated = groups.non_deprecated + [group] -%} +{%- endif -%} +{%- endfor -%} +{%- set attr_groups = groups.non_deprecated + groups.deprecated -%} + + + + +# {{ attrs.humanize(attrs.sentence_case(ctx.id)) | acronym }} + +{%- if attr_groups | length > 1 %} +{% for group in attr_groups %} +- [{{ attrs.display_name(group) }}](#{{ attrs.heading_link_fragments(attrs.display_name(group)) }}) +{%- endfor -%} +{%- endif %} +{% for group in attr_groups %} +## {{ attrs.display_name(group) }} + +{% if group.brief.endswith("\n") -%} +{{ group.brief }} +{% else -%} +{{ group.brief }} +{{"\n"}} +{%- endif -%} +| Attribute | Type | Description | Examples | Stability | +|---|---|---|---|---| +{%- for attribute in group.attributes | sort(attribute="name") %}{% set attr_anchor = attribute.name | kebab_case %} +| {{ attrs.name(attribute) }} | {{ attrs.type(attribute) }} | {{ attribute.brief | trim }}{{ notes.add({"note": attribute.note, "name": attribute.name}) }} | {{ examples.format(attribute) | trim }} | {{ stability.badge(attribute.stability, attribute.deprecated) | trim }} | +{%- endfor %} +{{ notes.render() }} +{%- for enum in group.attributes | sort(attribute="name") %} +{%- if enum.type is mapping -%}{{ enums.table(enum, notes) }}{% endif %} +{%- endfor -%} +{%- endfor -%} diff --git a/ibm-mq-metrics/templates/registry/markdown/attribute_table.j2 b/ibm-mq-metrics/templates/registry/markdown/attribute_table.j2 new file mode 100644 index 000000000..b40c54788 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/attribute_table.j2 @@ -0,0 +1,13 @@ +{% import 'requirement.j2' as requirement %} +{% import 'stability.j2' as stability %} +{% import 'notes.j2' as notes %} +{% import 'attribute_macros.j2' as attrs %} +{% import 'enum_macros.j2' as enums %} +{% import 'sampling_macros.j2' as sampling %} +{% import 'examples_macros.j2' as examples %} +{#- Macro for creating attribute table -#} +{% macro generate(attributes, tag_filter, attribute_registry_base_url, lineage_attributes) %}{% if (tag_filter | length == 0) %}{% set filtered_attributes = attributes %}{% else %}{% set filtered_attributes = attributes | selectattr("tag", "in", tag_filter) %}{% endif %}{% if filtered_attributes | length > 0 %}| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +{% for attribute in filtered_attributes | attribute_sort %}| {{ attrs.name(attribute) }} | {{ attrs.type(attribute) }} | {{ attribute.brief | trim }}{{ notes.add({"note": attribute.note, "name": attribute.name}) }} | {{ examples.format(attribute) | trim }} | {{ requirement.render({"level": attribute.requirement_level, "name": attribute.name}, notes) | trim }} | {{ stability.badge(attribute.stability, attribute.deprecated) | trim }} | +{% endfor %}{{ notes.render() }}{{ sampling.snippet(filtered_attributes, attribute_registry_base_url, lineage_attributes) }}{{ enums.tables(filtered_attributes | selectattr("type", "mapping"), notes) }} +{% endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/body_field_table.j2 b/ibm-mq-metrics/templates/registry/markdown/body_field_table.j2 new file mode 100644 index 000000000..b600f6dd7 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/body_field_table.j2 @@ -0,0 +1,18 @@ +{% import 'requirement.j2' as requirement %} +{% import 'stability.j2' as stability %} +{% import 'notes.j2' as notes %} +{% import 'enum_macros.j2' as enums %} +{% import 'examples_macros.j2' as examples %} +{% macro flatten(fields, ns, depth) %}{% if fields %}{% for f in fields | sort(attribute="id") %} +{% set ns.flat = [ns.flat, [{'field':f,'depth':depth}]] | flatten %}{% if f.fields %}{% set _= flatten(f.fields, ns, depth + 1) %}{% endif %} +{% endfor %}{% endif %}{% endmacro %} +{% macro field_name(field, depth) -%} +{%- set name= " " * 2 * depth ~ '`' ~ field.id ~ '`' -%} +{%- if (field.type == "map") or (field.type == "map[]") %}{{ name ~ ":"}}{% else -%} +{{ name }}{% endif %}{% endmacro %} +{#- Macro for creating body table -#} +{% macro generate(fields) %}{% if (fields | length > 0) %}{% set ns = namespace(flat=[])%}{% set _ = flatten(fields, ns, 0) %}| Body Field | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability | +|---|---|---|---|---|---| +{% for f in ns.flat %}| {{ field_name(f.field, f.depth) }} | {{ f.field.type }} | {{ f.field.brief | trim }}{{ notes.add({"note": f.field.note}) }} | {{ examples.format(f.field) | trim }} | {{ requirement.render({"level": f.field.requirement_level, "name": f.field.id}, notes) | trim }} | {{ stability.badge(f.field.stability, f.field.deprecated) | trim }} | +{% endfor %}{{ notes.render() }}{{ enums.field_tables(ns.flat | map(attribute="field") | selectattr("type", "eq", "enum"), notes) -}} +{%- endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/enum_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/enum_macros.j2 new file mode 100644 index 000000000..d20327e14 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/enum_macros.j2 @@ -0,0 +1,30 @@ +{% import 'stability.j2' as stability %} +{% macro filter(member) %}{% if (member.deprecated is none or member.deprecated == "") %}{{ "True" }}{% else %}{{ "False" }}{% endif %}{% endmacro %} +{% macro table(enum, notes) %} +--- + +`{{enum.name}}` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used. + +| Value | Description | Stability | +|---|---|---| +{% for espec in enum.type.members | sort(attribute='value') %} +{%- if filter(espec) == "True" -%} +| `{{ espec.value }}` | {{ (espec.brief or espec.id) | trim }}{{ notes.add({"note": espec.note}) }} | {{ stability.badge(espec.stability, espec.deprecated) }} | +{% endif %}{% endfor %}{{ notes.render() }}{% endmacro %} +{% macro tables(enums, notes) -%} +{% for enum in enums | sort(attribute="name") -%} +{{ table(enum, notes) -}} +{% endfor %}{% endmacro %} +{% macro field_table(enum, notes) %} +`{{enum.id}}` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used. + +| Value | Description | Stability | +|---|---|---| +{% for espec in enum.members | sort(attribute='value') %} +{%- if filter(espec) == "True" -%} +| `{{ espec.value }}` | {{ (espec.brief or espec.id) | trim }}{{ notes.add({"note": espec.note}) }} | {{ stability.badge(espec.stability, espec.deprecated) }} | +{% endif %}{% endfor %}{{ notes.render() }}{% endmacro %} +{% macro field_tables(enums, notes) -%} +{% for enum in enums | sort(attribute="id") -%} +{{ field_table(enum, notes) -}} +{% endfor %}{% endmacro %} \ No newline at end of file diff --git a/ibm-mq-metrics/templates/registry/markdown/event_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/event_macros.j2 new file mode 100644 index 000000000..c6ef8ef6f --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/event_macros.j2 @@ -0,0 +1,16 @@ +{#- Macros for simplifying creating "Event" documentation. -#} +{% import 'stability.j2' as stability %} +{% import 'body_field_table.j2' as body_table %} +{% macro header(event) %}**Status:** {{ stability.badge(event.stability, event.deprecated) }} + +The event name MUST be `{{ event.name }}`. + +{{ event.brief | trim }} +{%if event.note %} +{{ event.note | trim }} +{% endif %} +{% endmacro %} +{% macro body(body) %}{% if body %}**Body fields:** + +{{ body_table.generate(body.fields) }} +{% endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/examples_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/examples_macros.j2 new file mode 100644 index 000000000..bbd840d67 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/examples_macros.j2 @@ -0,0 +1,17 @@ +{% macro print_examples(examples) %}{%- for e in examples %}{%if loop.first == false %}; {% endif %}`{{ e | trim }}`{%- endfor %}{% endmacro %} + +{% macro format(item) %}{%- if item.examples %} +{%- if "[]" in item.type and "template" not in item.type %} +{%- if item.examples is sequence %} +{%- if item.examples | select("sequence") | length == 0 %}`{{ item.examples | trim }}` +{%- else %}{{ print_examples(item.examples) }} +{%- endif %} +{%- else %}`[{{ item.examples | trim }}]` +{%- endif %} +{%- elif item.examples is sequence %}{{ print_examples(item.examples) }} +{%- else %}`{{ item.examples | trim }}` +{%- endif %}{%- elif item.type is mapping %} +{%- for e in item.type.members %}{% if loop.index0 < 3 %}{% if loop.first == false %}; {% endif %}`{{ e.value | trim }}`{% endif %}{%- endfor %} +{%- elif item.type == "enum" -%} +{%- for e in item.members %}{% if loop.index0 < 3 %}{% if loop.first == false %}; {% endif %}`{{ e.value | trim }}`{% endif %}{%- endfor %} +{%- endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/metric_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/metric_macros.j2 new file mode 100644 index 000000000..deae008dd --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/metric_macros.j2 @@ -0,0 +1,8 @@ +{% macro instrument(type) -%} +{%- if type == "gauge" %}Gauge +{% elif type == "counter" %}Counter +{% elif type == "updowncounter" %}UpDownCounter +{% elif type == "histogram" %}Histogram +{% else %}{{ type }} +{%- endif %} +{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/metric_table.j2 b/ibm-mq-metrics/templates/registry/markdown/metric_table.j2 new file mode 100644 index 000000000..ddbd191c6 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/metric_table.j2 @@ -0,0 +1,7 @@ +{% import 'stability.j2' as stability %} +{% import 'notes.j2' as notes %} +{% import 'metric_macros.j2' as metrics %} +{% macro generate(group) %}| Name | Instrument Type | Unit (UCUM) | Description | Stability | +| -------- | --------------- | ----------- | -------------- | --------- | +| `{{ group.metric_name }}` | {{ metrics.instrument(group.instrument) | trim }} | `{{ group.unit }}` | {{ group.brief | trim }}{{ notes.add({"note": group.note}) }} | {{ stability.badge(group.stability, group.deprecated) | trim }} | +{{ notes.render() }}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/metrics.md.j2 b/ibm-mq-metrics/templates/registry/markdown/metrics.md.j2 new file mode 100644 index 000000000..0ff8153bc --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/metrics.md.j2 @@ -0,0 +1,14 @@ +{%- import 'attribute_table.j2' as at -%} +{%- import 'metric_table.j2' as mt -%} + +# Produced Metrics + +{% for metric in ctx %} +## Metric `{{metric.metric_name}}` + +{{ mt.generate(metric) }} + +### `{{metric.metric_name}}` Attributes + +{{ at.generate(metric.attributes, "", "", metric.lineage.attributes) }} +{% endfor %} \ No newline at end of file diff --git a/ibm-mq-metrics/templates/registry/markdown/notes.j2 b/ibm-mq-metrics/templates/registry/markdown/notes.j2 new file mode 100644 index 000000000..a96b4270e --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/notes.j2 @@ -0,0 +1,9 @@ +{%- set ns = namespace(notes=[],index=0) -%} +{%- macro add(note) %}{% if note.note %}{% set ns.notes = [ns.notes, [note]] | flatten %} [{{ ns.notes | length + ns.index }}]{% endif %}{% endmacro %} +{%- macro add_with_limit(note) %}{% if note.note | length > 50 %}{% set ns.notes = [ns.notes, [note]] | flatten %} [{{ ns.notes | length + ns.index }}]{% elif note.note %} {{ note.note | trim }}{% endif %}{% endmacro %} +{% macro render() %}{% if ns.notes | length > 0 %} +{%- for note in ns.notes %} +{% if note.name %}**[{{ns.index+loop.index}}] `{{note.name}}`:** {{ note.note | trim }}{% else -%}**[{{ns.index+loop.index}}]:** {{ note.note | trim }} {%- endif -%} +{%- if not loop.last -%}{{"\n"}}{%- endif -%} +{% endfor %}{% set ns.index = ns.notes | length + ns.index %}{% set ns.notes = [] %} +{% endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/readme.md.j2 b/ibm-mq-metrics/templates/registry/markdown/readme.md.j2 new file mode 100644 index 000000000..8aea78a9f --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/readme.md.j2 @@ -0,0 +1,42 @@ +{{- template.set_file_name("README.md") -}} + + + + + +# Attribute registry + +The attributes registry is the place where attributes are defined. An attribute definition covers the following properties of an attribute: + +- the `id` (the fully qualified name) of the attribute +- the `type` of the attribute +- the `stability` of the attribute +- a `brief` description of the attribute and optionally a longer `note` +- example values + +Attributes defined in the registry can be used in different semantic conventions. Attributes should be included in this registry before they are used in semantic conventions. Semantic conventions may override all the properties of an attribute except for the `id`, `type` and `stability` in case it's required for a particular context. In addition, semantic conventions specify the requirement level of an attribute in the corresponding context. + +A definition of an attribute in the registry doesn't necessarily imply that the attribute is used in any of the semantic conventions. + +If applicable, application developers are encouraged to use existing attributes from this registry. See also [these recommendations][developers recommendations] regarding attribute selection and attribute naming for custom use cases. + +All registered attributes are listed by namespace in this registry. + +> [!WARNING] +> +> The following registry overview is a work in progress. +> +> Further attribute namespaces are currently being migrated and will appear in this registry soon. + +Currently, the following namespaces exist: + +{% for bundle in ctx %} +{%- set my_file_name = bundle.id | kebab_case ~ ".md" -%} +- [{{ bundle.id | title_case | acronym }}]({{ my_file_name }}) +{% endfor %} +[developers recommendations]: ../general/naming.md#recommendations-for-application-developers + diff --git a/ibm-mq-metrics/templates/registry/markdown/requirement.j2 b/ibm-mq-metrics/templates/registry/markdown/requirement.j2 new file mode 100644 index 000000000..1f5714de3 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/requirement.j2 @@ -0,0 +1,9 @@ +{% macro render(attr, notes) -%} +{%- if attr.level == "recommended" %}`Recommended` +{% elif attr.level == "required" %}`Required` +{% elif attr.level == "opt_in" %}`Opt-In` +{% elif attr.level.conditionally_required %}`Conditionally Required`{{ notes.add_with_limit({"note": attr.level.conditionally_required, "name": attr.name}) }} +{% elif attr.level.recommended %}`Recommended`{{ notes.add_with_limit({"note": attr.level.recommended, "name": attr.name}) }} +{% else %}{{ level }} +{%- endif %} +{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/resource_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/resource_macros.j2 new file mode 100644 index 000000000..f19680cbd --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/resource_macros.j2 @@ -0,0 +1,16 @@ +{#- Macros for simplifying creating "Resource" documentation. -#} +{% import 'stability.j2' as stability %} +{% macro real_stability(resource) %} +{% if resource.attributes | map(attribute='stability') | unique | length > 1 -%} +{{ stability.badge("mixed", "") }} +{%- else -%} +{{ stability.badge(resource.stability, resource.deprecated) }} +{%- endif %} +{% endmacro %} +{% macro header(resource) %} +**Status:** {{ real_stability(resource) | trim }} + +**type:** `{{ resource.name }}` + +**Description:** {{ resource.brief }} +{% endmacro %} \ No newline at end of file diff --git a/ibm-mq-metrics/templates/registry/markdown/sampling_macros.j2 b/ibm-mq-metrics/templates/registry/markdown/sampling_macros.j2 new file mode 100644 index 000000000..07929e7d4 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/sampling_macros.j2 @@ -0,0 +1,7 @@ +{% import 'attribute_macros.j2' as attrs %} +{% macro snippet(attributes, attribute_registry_base_url, lineage_attributes) %}{% set sampling_attributes = attributes | selectattr("sampling_relevant", "true") %}{% if sampling_attributes | length > 0 %} +The following attributes can be important for making sampling decisions +and SHOULD be provided **at span creation time** (if provided at all): + +{% for attribute in sampling_attributes | sort(attribute="name") %}* {{ attrs.name_with_link(attribute, attribute_registry_base_url, lineage_attributes) }} +{% endfor %}{% endif %}{% endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/snippet.md.j2 b/ibm-mq-metrics/templates/registry/markdown/snippet.md.j2 new file mode 100644 index 000000000..6b225c811 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/snippet.md.j2 @@ -0,0 +1,32 @@ + + + + + +{%- import 'attribute_table.j2' as at -%} +{%- import 'metric_table.j2' as mt -%} +{%- import 'event_macros.j2' as event -%} +{%- import 'resource_macros.j2' as resource %} + +{% macro generate_event(group) -%} +{{ event.header(group) }}{{ generate_attributes(group) }}{{ event.body(group.body) }}{% endmacro -%} +{%- macro generate_resource(group) -%} +{{ resource.header(group) }}{{ generate_attributes(group) }}{% endmacro -%} +{%- macro generate_metric(group) -%} +{{ mt.generate(group) }} +{{ generate_attributes(group) }}{% endmacro -%} +{%- macro generate_attributes(group) -%} +{{ at.generate(group.attributes, tag_filter, attribute_registry_base_url, group.lineage.attributes) }}{% endmacro -%} + +{% if group.type == "event" -%} +{{ generate_event(group) -}} +{%- elif group.type == "resource" -%} +{{ generate_resource(group) }} +{%- elif group.type == "metric" -%} +{{ generate_metric(group) }} +{%- else -%} +{{ generate_attributes(group) -}} +{% endif -%} + + + diff --git a/ibm-mq-metrics/templates/registry/markdown/stability.j2 b/ibm-mq-metrics/templates/registry/markdown/stability.j2 new file mode 100644 index 000000000..cc9ed7bde --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/stability.j2 @@ -0,0 +1,11 @@ +{% macro badge(stability, deprecated) -%} +{%- if deprecated %}![Deprecated](https://img.shields.io/badge/-deprecated-red)
{{ deprecated.note | trim }} +{%- elif stability == "mixed" %}![Mixed](https://img.shields.io/badge/-mixed-yellow) +{%- elif stability == "stable" %}![Stable](https://img.shields.io/badge/-stable-lightgreen) +{%- elif stability == "release_candidate" %}![Release Candidate](https://img.shields.io/badge/-rc-mediumorchid) +{%- elif stability == "deprecated" %}![Deprecated](https://img.shields.io/badge/-deprecated-red) +{%- elif stability == "experimental" %}![Development](https://img.shields.io/badge/-development-blue) +{%- elif stability == "development" %}![Development](https://img.shields.io/badge/-development-blue) +{%- else %}{{ "Unknown stability." }} +{%- endif %} +{%- endmacro %} diff --git a/ibm-mq-metrics/templates/registry/markdown/weaver.yaml b/ibm-mq-metrics/templates/registry/markdown/weaver.yaml new file mode 100644 index 000000000..6a1dd65ad --- /dev/null +++ b/ibm-mq-metrics/templates/registry/markdown/weaver.yaml @@ -0,0 +1,4 @@ +templates: + - pattern: metrics.md.j2 + filter: '.groups | map(select(.type == "metric"))' + application_mode: single \ No newline at end of file diff --git a/ibm-mq-metrics/templates/registry/yaml/config.yml.j2 b/ibm-mq-metrics/templates/registry/yaml/config.yml.j2 new file mode 100644 index 000000000..5f23b27d0 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/yaml/config.yml.j2 @@ -0,0 +1,125 @@ +# This section defines the schedule at which the program will scrape metrics. +taskSchedule: + numberOfThreads: 20 + initialDelaySeconds: 0 + taskDelaySeconds: 60 + +#This is the timeout on queue metrics and channel metrics threads.Default value is 20 seconds. +#No need to change the default unless you know what you are doing. +#queueMetricsCollectionTimeoutInSeconds: 40 +#channelMetricsCollectionTimeoutInSeconds: 40 +#topicMetricsCollectionTimeoutInSeconds: 40 + +queueManagers: + - name: "QM1" + host: "localhost" + port: 1414 + + # Indicate the MaxActiveChannels as set in qm.ini, see https://www.ibm.com/docs/en/ibm-mq/9.3.x?topic=qmini-channels-stanza-file + maxActiveChannels: 4200 + + #The transport type for the queue manager connection, the default is "Bindings" for a binding type connection + #For bindings type, connection WMQ extension (i.e machine agent) need to be on the same machine on which WebbsphereMQ server is running + #For client type, connection change it to "Client". + transportType: "Bindings" + + #Channel name of the queue manager, channel should be server-conn type. + #This field is not required in case of transportType: Bindings + #channelName: "SYSTEM.ADMIN.SVRCONN" + + #for user access level, please check "Access Permissions" section on the extensions page + #comment out the username and password in case of transportType: Bindings. + #username: "" + #password: "" + + #PCF requests are always sent to SYSTEM.ADMIN.COMMAND.QUEUE. The PCF responses to these requests are sent to the default reply-to queue called + #SYSTEM.DEFAULT.MODEL.QUEUE. However, you can override this behavior and send it to a temporary dynamic queue by changing the modelQueueName and replyQueuePrefix fields. + #For more details around this https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.ref.adm.doc/q083240_.htm & https://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.adm.doc/q020010_.htm + #modelQueueName: "" + #replyQueuePrefix: "" + + # Name of the temporary dynamic queue holding the configuration events. This queue contains information regarding the configuration of the queue manager, notable MaxChannels and MaxActiveChannels. + # If unset, the default queue name `SYSTEM.ADMIN.CONFIG.EVENT` is applied. + # Configuration events need to be enabled explicitly in the queue manager configuration. See https://www.ibm.com/docs/en/ibm-mq/9.4.x?topic=monitoring-configuration-events for reference. + #configurationQueueName: "SYSTEM.ADMIN.CONFIG.EVENT" + + # Interval in milliseconds at which the configuration events in the configuration queue can be consumed. + # By default, no events are consumed. + #consumeConfigurationEventInterval: 600000 # 10 minutes + + # Enable running a queue manager refresh request to reload its configuration and create a configuration event. + # This action is only executed if no configuration events are found when reading the configuration queue.name: + # By default, this action is disabled. + #refreshQueueManagerConfigurationEnabled: false + + #Sets the CCSID used in the message descriptor of request and response messages. The default value is MQC.MQCCSI_Q_MGR. + #To set this, please use the integer value. + #ccsid: + + #Sets the encoding used in the message descriptor of request and response messages. The default value is MQC.MQENC_NATIVE. + #To set this, please use the integer value. + #encoding: + + # IBM Cipher Suite e.g. "SSL_RSA_WITH_AES_128_CBC_SHA256".. + # For translation to IBM Cipher http://www.ibm.com/support/knowledgecenter/SSFKSJ_8.0.0/com.ibm.mq.dev.doc/q113210_.htm + # A cipher working for IBM Cloud MQ and Temurin JDK 8 is TLS_AES_128_GCM_SHA256 + #cipherSuite: "" + + queueFilters: + #Can provide complete queue name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","AMQ"] + + + channelFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + listenerFilters: + #Can provide complete channel name or generic names. A generic name is a character string followed by an asterisk (*), + #for example ABC*, and it selects all objects having names that start with the selected character string. + #An asterisk on its own matches all possible names. + include: ["*"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM"] + + topicFilters: + # For topics, IBM MQ uses the topic wildcard characters ('#' and '+') and does not treat a trailing asterisk as a wildcard + # https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_7.5.0/com.ibm.mq.pla.doc/q005020_.htm + include: ["#"] + exclude: + #type value: STARTSWITH, EQUALS, ENDSWITH, CONTAINS + - type: "STARTSWITH" + #The name of the queue or queue name pattern as per queue filter, comma separated values + values: ["SYSTEM","$SYS"] + +metrics: +{% for metric in ctx %} {{ metric.metric_name }}: # {{ metric.brief | safe }} + enabled: true +{% endfor %} +sslConnection: + trustStorePath: "" + trustStorePassword: "" + + keyStorePath: "" + keyStorePassword: "" + +# Configure the OTLP exporter using system properties keys following the specification https://opentelemetry.io/docs/languages/java/configuration/ +otlpExporter: + otel.exporter.otlp.endpoint: http://localhost:4318 diff --git a/ibm-mq-metrics/templates/registry/yaml/weaver.yaml b/ibm-mq-metrics/templates/registry/yaml/weaver.yaml new file mode 100644 index 000000000..43a70f633 --- /dev/null +++ b/ibm-mq-metrics/templates/registry/yaml/weaver.yaml @@ -0,0 +1,4 @@ +templates: + - pattern: config.yml.j2 + filter: '.groups | map(select(.type == "metric"))' + application_mode: single \ No newline at end of file diff --git a/ibm-mq-metrics/weaver.Dockerfile b/ibm-mq-metrics/weaver.Dockerfile new file mode 100644 index 000000000..92fcad00e --- /dev/null +++ b/ibm-mq-metrics/weaver.Dockerfile @@ -0,0 +1,6 @@ +# DO NOT BUILD +# This file is just for tracking dependencies of the semantic convention build. +# Dependabot can keep this file up to date with latest containers. + +# Weaver is used to generate markdown docs, and enforce policies on the model and run integration tests. +FROM otel/weaver:v0.18.0@sha256:5425ade81dc22ddd840902b0638b4b6a9186fb654c5b50c1d1ccd31299437390 AS weaver \ No newline at end of file diff --git a/inferred-spans/README.md b/inferred-spans/README.md index 9b32f51af..667f0d201 100644 --- a/inferred-spans/README.md +++ b/inferred-spans/README.md @@ -41,7 +41,6 @@ So if you are using an autoconfigured OpenTelemetry SDK, you'll only need to add | otel.inferred.spans.interval
OTEL_INFERRED_SPANS_INTERVAL | `5s` | The interval at which profiling sessions should be started. | | otel.inferred.spans.duration
OTEL_INFERRED_SPANS_DURATION | `5s` | The duration of a profiling session. For sampled transactions which fall within a profiling session (they start after and end before the session), so-called inferred spans will be created. They appear in the trace waterfall view like regular spans.
NOTE: It is not recommended to set much higher durations as it may fill the activation events file and async-profiler's frame buffer. Warnings will be logged if the activation events file is full. If you want to have more profiling coverage, try decreasing `profiling_inferred_spans_interval` | | otel.inferred.spans.lib.directory
OTEL_INFERRED_SPANS_LIB_DIRECTORY | Defaults to the value of `java.io.tmpdir` | Profiling requires that the [async-profiler](https://github.com/async-profiler/async-profiler) shared library is exported to a temporary location and loaded by the JVM. The partition backing this location must be executable, however in some server-hardened environments, `noexec` may be set on the standard `/tmp` partition, leading to `java.lang.UnsatisfiedLinkError` errors. Set this property to an alternative directory (e.g. `/var/tmp`) to resolve this. | -| otel.inferred.spans.duration
OTEL_INFERRED_SPANS_DURATION | `5s` | The duration of a profiling session. For sampled transactions which fall within a profiling session (they start after and end before the session), so-called inferred spans will be created. They appear in the trace waterfall view like regular spans.
NOTE: It is not recommended to set much higher durations as it may fill the activation events file and async-profiler's frame buffer. Warnings will be logged if the activation events file is full. If you want to have more profiling coverage, try decreasing `profiling_inferred_spans_interval` | | otel.inferred.spans.parent.override.handler
OTEL_INFERRED_SPANS_PARENT_OVERRIDE_HANDLER | Defaults to a handler adding span-links to the inferred span | Inferred spans sometimes need to be inserted as the new parent of a normal span, which is not directly possible because that span has already been sent. For this reason, this relationship needs to be represented differently, which normally is done by adding a span-link to the inferred span. This configuration can be used to override that behaviour by providing the fully qualified name of a class implementing `BiConsumer`: The biconsumer will be invoked with the inferred span as first argument and the span for which the inferred one was detected as new parent as second argument | ### Manual SDK setup diff --git a/inferred-spans/build.gradle.kts b/inferred-spans/build.gradle.kts index dba8e0334..98d5e33a3 100644 --- a/inferred-spans/build.gradle.kts +++ b/inferred-spans/build.gradle.kts @@ -13,6 +13,7 @@ dependencies { compileOnly("com.google.auto.service:auto-service-annotations") compileOnly("io.opentelemetry:opentelemetry-sdk") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") + compileOnly("io.opentelemetry.semconv:opentelemetry-semconv") implementation("com.lmax:disruptor") implementation("org.jctools:jctools-core") implementation("tools.profiler:async-profiler") @@ -21,7 +22,7 @@ dependencies { testAnnotationProcessor("com.google.auto.service:auto-service") testCompileOnly("com.google.auto.service:auto-service-annotations") - testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") + testImplementation("io.opentelemetry.semconv:opentelemetry-semconv") testImplementation("io.opentelemetry:opentelemetry-sdk") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") @@ -42,4 +43,8 @@ tasks { } } } + + withType().configureEach { + jvmArgs("-Djava.util.logging.config.file=${project.projectDir.resolve("src/test/resources/logging.properties")}") + } } diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpans.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpans.java new file mode 100644 index 000000000..76f55db83 --- /dev/null +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpans.java @@ -0,0 +1,51 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.inferredspans; + +import java.time.Duration; +import javax.annotation.Nullable; + +/** + * A global accessor for the {@link InferredSpansProcessor} instance. + * + *

This class is for internal use only and may be removed in a future release. + */ +public final class InferredSpans { + + @Nullable private static volatile InferredSpansProcessor instance; + + private InferredSpans() {} + + /** + * Sets the {@link InferredSpansProcessor} instance. + * + * @param processor the processor instance + */ + public static void setInstance(@Nullable InferredSpansProcessor processor) { + instance = processor; + } + + /** + * Returns whether inferred spans are enabled. + * + * @return whether inferred spans are enabled + */ + public static boolean isEnabled() { + return instance != null; + } + + /** + * Sets the profiler interval. + * + * @param interval the new profiler interval + */ + public static void setProfilerInterval(Duration interval) { + InferredSpansProcessor p = instance; + if (p != null) { + p.setProfilerInterval(interval); + } + } +} diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfig.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfig.java index 6a4e1ba00..7ee78943e 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfig.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfig.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.inferredspans; +import static java.util.stream.Collectors.toList; + import com.google.auto.service.AutoService; import io.opentelemetry.api.trace.SpanBuilder; import io.opentelemetry.api.trace.SpanContext; @@ -17,7 +19,6 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.logging.Logger; -import java.util.stream.Collectors; import javax.annotation.Nullable; @AutoService(AutoConfigurationCustomizerProvider.class) @@ -90,35 +91,34 @@ private static class PropertiesApplier { private final ConfigProperties properties; - public PropertiesApplier(ConfigProperties properties) { + PropertiesApplier(ConfigProperties properties) { this.properties = properties; } - public void applyBool(String configKey, Consumer funcToApply) { + void applyBool(String configKey, Consumer funcToApply) { applyValue(properties.getBoolean(configKey), funcToApply); } - public void applyInt(String configKey, Consumer funcToApply) { + void applyInt(String configKey, Consumer funcToApply) { applyValue(properties.getInt(configKey), funcToApply); } - public void applyDuration(String configKey, Consumer funcToApply) { + void applyDuration(String configKey, Consumer funcToApply) { applyValue(properties.getDuration(configKey), funcToApply); } - public void applyString(String configKey, Consumer funcToApply) { + void applyString(String configKey, Consumer funcToApply) { applyValue(properties.getString(configKey), funcToApply); } - public void applyWildcards( - String configKey, Consumer> funcToApply) { + void applyWildcards(String configKey, Consumer> funcToApply) { String wildcardListString = properties.getString(configKey); if (wildcardListString != null && !wildcardListString.isEmpty()) { List values = Arrays.stream(wildcardListString.split(",")) .filter(str -> !str.isEmpty()) .map(WildcardMatcher::valueOf) - .collect(Collectors.toList()); + .collect(toList()); if (!values.isEmpty()) { funcToApply.accept(values); } diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessor.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessor.java index 22f59ba53..19baf3174 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessor.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessor.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.inferredspans; +import static java.util.Objects.requireNonNull; + import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.trace.Tracer; import io.opentelemetry.api.trace.TracerProvider; @@ -19,7 +21,7 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.util.Objects; +import java.time.Duration; import java.util.Properties; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; @@ -33,14 +35,13 @@ public class InferredSpansProcessor implements SpanProcessor { private static final Logger logger = Logger.getLogger(InferredSpansProcessor.class.getName()); public static final String TRACER_NAME = "inferred-spans"; - public static final String TRACER_VERSION = readInferredSpansVersion(); // Visible for testing final SamplingProfiler profiler; + private final InferredSpansConfiguration config; private Supplier tracerProvider = GlobalOpenTelemetry::getTracerProvider; - @Nullable private volatile Tracer tracer; InferredSpansProcessor( @@ -49,12 +50,18 @@ public class InferredSpansProcessor implements SpanProcessor { boolean startScheduledProfiling, @Nullable File activationEventsFile, @Nullable File jfrFile) { + this.config = config; profiler = new SamplingProfiler(config, clock, this::getTracer, activationEventsFile, jfrFile); if (startScheduledProfiling) { profiler.start(); } } + public void setProfilerInterval(Duration interval) { + config.setProfilerInterval(interval); + profiler.reschedule(); + } + public static InferredSpansProcessorBuilder builder() { return new InferredSpansProcessorBuilder(); } @@ -134,7 +141,7 @@ private static String readInferredSpansVersion() { Properties properties = new Properties(); properties.load(is); String version = (String) properties.get("contrib.version"); - Objects.requireNonNull(version); + requireNonNull(version); return version; } catch (IOException e) { throw new IllegalStateException(e); diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessorBuilder.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessorBuilder.java index b464f0f42..e8f52ed68 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessorBuilder.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/InferredSpansProcessorBuilder.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.inferredspans; +import static java.util.Arrays.asList; + import io.opentelemetry.api.trace.SpanBuilder; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.contrib.inferredspans.internal.CallTree; @@ -12,7 +14,6 @@ import io.opentelemetry.contrib.inferredspans.internal.SpanAnchoredClock; import java.io.File; import java.time.Duration; -import java.util.Arrays; import java.util.List; import java.util.function.BiConsumer; import javax.annotation.Nullable; @@ -27,7 +28,7 @@ public class InferredSpansProcessorBuilder { private Duration inferredSpansMinDuration = Duration.ZERO; private List includedClasses = WildcardMatcher.matchAllList(); private List excludedClasses = - Arrays.asList( + asList( WildcardMatcher.caseSensitiveMatcher("java.*"), WildcardMatcher.caseSensitiveMatcher("javax.*"), WildcardMatcher.caseSensitiveMatcher("sun.*"), @@ -43,13 +44,11 @@ public class InferredSpansProcessorBuilder { WildcardMatcher.caseSensitiveMatcher("io.undertow.*")); private Duration profilerInterval = Duration.ofSeconds(5); private Duration profilingDuration = Duration.ofSeconds(5); - @Nullable private String profilerLibDirectory = null; // The following options are only intended to be modified in tests private SpanAnchoredClock clock = new SpanAnchoredClock(); private boolean startScheduledProfiling = true; - @Nullable private File activationEventsFile = null; @Nullable private File jfrFile = null; private BiConsumer parentOverrideHandler = @@ -72,8 +71,11 @@ public InferredSpansProcessor build() { profilingDuration, profilerLibDirectory, parentOverrideHandler); - return new InferredSpansProcessor( - config, clock, startScheduledProfiling, activationEventsFile, jfrFile); + InferredSpansProcessor processor = + new InferredSpansProcessor( + config, clock, startScheduledProfiling, activationEventsFile, jfrFile); + InferredSpans.setInstance(processor); + return processor; } /** diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/WildcardMatcher.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/WildcardMatcher.java index b6b788c55..7e82a0ec1 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/WildcardMatcher.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/WildcardMatcher.java @@ -5,8 +5,9 @@ package io.opentelemetry.contrib.inferredspans; +import static java.util.Collections.singletonList; + import java.util.ArrayList; -import java.util.Collections; import java.util.List; import javax.annotation.Nullable; @@ -32,7 +33,7 @@ public abstract class WildcardMatcher { private static final String CASE_SENSITIVE_PREFIX = "(?-i)"; private static final String WILDCARD = "*"; private static final WildcardMatcher MATCH_ALL = valueOf(WILDCARD); - private static final List MATCH_ALL_LIST = Collections.singletonList(MATCH_ALL); + private static final List MATCH_ALL_LIST = singletonList(MATCH_ALL); public static WildcardMatcher caseSensitiveMatcher(String matcher) { return valueOf(CASE_SENSITIVE_PREFIX + matcher); diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/CallTree.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/CallTree.java index 74468a3fd..a4a59a037 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/CallTree.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/CallTree.java @@ -5,7 +5,6 @@ package io.opentelemetry.contrib.inferredspans.internal; -import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.CODE_STACKTRACE; import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.LINK_IS_CHILD; import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.SPAN_IS_INFERRED; import static java.util.logging.Level.FINE; @@ -20,6 +19,7 @@ import io.opentelemetry.contrib.inferredspans.internal.pooling.ObjectPool; import io.opentelemetry.contrib.inferredspans.internal.pooling.Recyclable; import io.opentelemetry.contrib.inferredspans.internal.util.HexUtils; +import io.opentelemetry.semconv.CodeAttributes; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -515,7 +515,7 @@ protected Span asSpan( assert this.parent != null; tempBuilder.setLength(0); this.parent.fillStackTrace(tempBuilder); - spanBuilder.setAttribute(CODE_STACKTRACE, tempBuilder.toString()); + spanBuilder.setAttribute(CodeAttributes.CODE_STACKTRACE, tempBuilder.toString()); } Span span = spanBuilder.startSpan(); diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/InferredSpansConfiguration.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/InferredSpansConfiguration.java index 5091a36a5..819a5b8cf 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/InferredSpansConfiguration.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/InferredSpansConfiguration.java @@ -23,7 +23,7 @@ public class InferredSpansConfiguration { private final Duration inferredSpansMinDuration; private final List includedClasses; private final List excludedClasses; - private final Duration profilerInterval; + private volatile Duration profilerInterval; private final Duration profilingDuration; @Nullable private final String profilerLibDirectory; private final BiConsumer parentOverrideHandler; @@ -84,6 +84,10 @@ public Duration getProfilingInterval() { return profilerInterval; } + public void setProfilerInterval(Duration profilerInterval) { + this.profilerInterval = profilerInterval; + } + public Duration getProfilingDuration() { return profilingDuration; } diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfiler.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfiler.java index d05496084..27d0e5305 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfiler.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfiler.java @@ -39,6 +39,7 @@ import java.util.Locale; import java.util.Objects; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; @@ -151,6 +152,7 @@ public class SamplingProfiler implements Runnable { private final Supplier tracerProvider; private final AsyncProfiler profiler; + @Nullable private volatile Future profilingTask; /** * Creates a sampling profiler, optionally relying on existing files. @@ -385,7 +387,7 @@ public void run() { if (!interrupted && !scheduler.isShutdown()) { long delay = config.getProfilingInterval().toMillis() - profilingDuration.toMillis(); - scheduler.schedule(this, delay, TimeUnit.MILLISECONDS); + profilingTask = scheduler.schedule(this, delay, TimeUnit.MILLISECONDS); } } @@ -426,7 +428,7 @@ private void profile(Duration profilingDuration) throws Exception { String createStartCommand() { StringBuilder startCommand = - new StringBuilder("start,jfr,clock=m,event=wall,cstack=n,interval=") + new StringBuilder("start,jfr,clock=m,event=wall,nobatch,cstack=n,interval=") .append(config.getSamplingInterval().toMillis()) .append("ms,filter,file=") .append(jfrFile) @@ -723,7 +725,19 @@ public void copyFromFiles(Path activationEvents, Path traces) throws IOException @SuppressWarnings("FutureReturnValueIgnored") public void start() { - scheduler.submit(this); + profilingTask = scheduler.submit(this); + } + + @SuppressWarnings({"FutureReturnValueIgnored", "Interruption"}) + public void reschedule() { + Future future = this.profilingTask; + if (future != null) { + if (future.cancel(true)) { + Duration profilingDuration = config.getProfilingDuration(); + long delay = config.getProfilingInterval().toMillis() - profilingDuration.toMillis(); + profilingTask = scheduler.schedule(this, delay, TimeUnit.MILLISECONDS); + } + } } public void stop() throws InterruptedException, IOException { @@ -806,7 +820,7 @@ public int compareTo(StackTraceEvent o) { } private static class ActivationEvent { - public static final int SERIALIZED_SIZE = + static final int SERIALIZED_SIZE = Long.SIZE / Byte.SIZE + // timestamp TraceContext.SERIALIZED_LENGTH @@ -826,7 +840,7 @@ private static class ActivationEvent { private long threadId; private boolean activation; - public void activation( + void activation( Span context, long threadId, @Nullable Span previousContext, @@ -835,7 +849,7 @@ public void activation( set(context, threadId, /* activation= */ true, previousContext, nanoTime, clock); } - public void deactivation( + void deactivation( Span context, long threadId, @Nullable Span previousContext, @@ -864,7 +878,7 @@ private void set( this.timestamp = nanoTime; } - public void handle(SamplingProfiler samplingProfiler) { + void handle(SamplingProfiler samplingProfiler) { if (logger.isLoggable(Level.FINE)) { logger.log( Level.FINE, @@ -975,7 +989,7 @@ private void stopProfiling(SamplingProfiler samplingProfiler) { } } - public void serialize(ByteBuffer buf) { + void serialize(ByteBuffer buf) { buf.putLong(timestamp); buf.put(traceContextBuffer); buf.put(previousContextBuffer); @@ -984,7 +998,7 @@ public void serialize(ByteBuffer buf) { buf.put(activation ? (byte) 1 : (byte) 0); } - public void deserialize(ByteBuffer buf) { + void deserialize(ByteBuffer buf) { timestamp = buf.getLong(); buf.get(traceContextBuffer); buf.get(previousContextBuffer); diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParser.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParser.java index c8ee4013f..44c6f9c13 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParser.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParser.java @@ -199,6 +199,10 @@ private void parseConstantPool() throws IOException { case ContentTypeId.CONTENT_SYMBOL: readSymbolConstants(count); break; + case ContentTypeId.CONTENT_STRING: + // ignore empty string + bufferedFile.skip(2); + break; default: throw new IllegalStateException("Unhandled constant pool type: " + typeId); } @@ -499,5 +503,6 @@ private ContentTypeId() {} static final int CONTENT_FRAME_TYPE = 24; static final int CONTENT_GC_WHEN = 32; static final int CONTENT_PACKAGE = 30; + static final int CONTENT_STRING = 20; } } diff --git a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/semconv/Attributes.java b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/semconv/Attributes.java index dddb3a738..c10662231 100644 --- a/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/semconv/Attributes.java +++ b/inferred-spans/src/main/java/io/opentelemetry/contrib/inferredspans/internal/semconv/Attributes.java @@ -11,8 +11,6 @@ public class Attributes { private Attributes() {} - public static final AttributeKey CODE_STACKTRACE = - AttributeKey.stringKey("code.stacktrace"); public static final AttributeKey LINK_IS_CHILD = AttributeKey.booleanKey("is_child"); public static final AttributeKey SPAN_IS_INFERRED = AttributeKey.booleanKey("is_inferred"); diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfigTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfigTest.java index 1b6a7e3e2..b5ce0c650 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfigTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansAutoConfigTest.java @@ -7,6 +7,7 @@ import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.condition.OS.WINDOWS; import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.OpenTelemetry; @@ -31,10 +32,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.OS; import org.junit.jupiter.api.io.TempDir; -public class InferredSpansAutoConfigTest { +@DisabledOnOs(WINDOWS) // Uses async-profiler, which is not supported on Windows +class InferredSpansAutoConfigTest { @BeforeEach @AfterEach @@ -98,7 +99,7 @@ public void checkAllOptions(@TempDir Path tmpDir) { } @Test - public void checkDisabledbyDefault() { + void checkDisabledbyDefault() { try (AutoConfigTestProperties props = new AutoConfigTestProperties()) { OpenTelemetry otel = GlobalOpenTelemetry.get(); List processors = OtelReflectionUtils.getSpanProcessors(otel); @@ -107,9 +108,8 @@ public void checkDisabledbyDefault() { } @DisabledOnOpenJ9 - @DisabledOnOs(OS.WINDOWS) @Test - public void checkProfilerWorking() { + void checkProfilerWorking() { try (AutoConfigTestProperties props = new AutoConfigTestProperties() .put(InferredSpansAutoConfig.ENABLED_OPTION, "true") diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansTest.java new file mode 100644 index 000000000..e0de5997e --- /dev/null +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/InferredSpansTest.java @@ -0,0 +1,88 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.inferredspans; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import io.opentelemetry.contrib.inferredspans.internal.SamplingProfiler; +import io.opentelemetry.contrib.inferredspans.internal.util.DisabledOnOpenJ9; +import java.time.Duration; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledOnOs; +import org.junit.jupiter.api.condition.OS; + +@DisabledOnOs(OS.WINDOWS) +@DisabledOnOpenJ9 +class InferredSpansTest { + + private ProfilerTestSetup setup; + + @BeforeEach + void setUp() { + InferredSpans.setInstance(null); + } + + @AfterEach + void tearDown() { + if (setup != null) { + setup.close(); + } + InferredSpans.setInstance(null); + } + + @Test + void testIsEnabled() { + assertThat(InferredSpans.isEnabled()).isFalse(); + + setup = ProfilerTestSetup.create(c -> {}); + + assertThat(InferredSpans.isEnabled()).isTrue(); + + setup.close(); + setup = null; + + // In a real-world scenario, the close() method would lead to the processor being garbage + // collected, but to make it deterministic, we manually set the instance to null + InferredSpans.setInstance(null); + assertThat(InferredSpans.isEnabled()).isFalse(); + } + + @Test + void testSetProfilerIntervalWhenDisabled() { + InferredSpans.setProfilerInterval(Duration.ofMillis(10)); + + setup = + ProfilerTestSetup.create( + c -> + c.profilerInterval(Duration.ofSeconds(10)) + .profilingDuration(Duration.ofMillis(500))); + + // assert that the interval set before the profiler was initialized is ignored + assertThat(setup.profiler.getConfig().getProfilingInterval()).isEqualTo(Duration.ofSeconds(10)); + } + + @Test + void testSetProfilerInterval() { + setup = + ProfilerTestSetup.create( + c -> + c.profilerInterval(Duration.ofSeconds(10)) + .profilingDuration(Duration.ofMillis(500))); + + SamplingProfiler profiler = setup.profiler; + await() + .untilAsserted(() -> assertThat(profiler.getProfilingSessions()).isGreaterThanOrEqualTo(1)); + + InferredSpans.setProfilerInterval(Duration.ofMillis(100)); + + await() + .timeout(Duration.ofSeconds(2)) + .untilAsserted(() -> assertThat(profiler.getProfilingSessions()).isGreaterThanOrEqualTo(2)); + } +} diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeSpanifyTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeSpanifyTest.java index 8eddda06f..057fab819 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeSpanifyTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeSpanifyTest.java @@ -5,7 +5,6 @@ package io.opentelemetry.contrib.inferredspans.internal; -import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.CODE_STACKTRACE; import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.SPAN_IS_INFERRED; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; @@ -21,6 +20,7 @@ import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.semconv.CodeAttributes; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -64,26 +64,26 @@ void testSpanification() throws Exception { SpanData a = setup.getSpans().get(1); assertThat(a).hasName("CallTreeTest#a"); assertThat(a.getEndEpochNanos() - a.getStartEpochNanos()).isEqualTo(30_000_000); - assertThat(a.getAttributes().get(CODE_STACKTRACE)).isBlank(); + assertThat(a.getAttributes().get(CodeAttributes.CODE_STACKTRACE)).isBlank(); assertThat(a).hasAttribute(SPAN_IS_INFERRED, true); SpanData b = setup.getSpans().get(2); assertThat(b).hasName("CallTreeTest#b"); assertThat(b.getEndEpochNanos() - b.getStartEpochNanos()).isEqualTo(20_000_000); - assertThat(b.getAttributes().get(CODE_STACKTRACE)).isBlank(); + assertThat(b.getAttributes().get(CodeAttributes.CODE_STACKTRACE)).isBlank(); assertThat(b).hasAttribute(SPAN_IS_INFERRED, true); SpanData d = setup.getSpans().get(3); assertThat(d).hasName("CallTreeTest#d"); assertThat(d.getEndEpochNanos() - d.getStartEpochNanos()).isEqualTo(10_000_000); - assertThat(d.getAttributes().get(CODE_STACKTRACE)) + assertThat(d.getAttributes().get(CodeAttributes.CODE_STACKTRACE)) .isEqualTo("at " + CallTreeTest.class.getName() + ".c(CallTreeTest.java)"); assertThat(d).hasAttribute(SPAN_IS_INFERRED, true); SpanData e = setup.getSpans().get(4); assertThat(e).hasName("CallTreeTest#e"); assertThat(e.getEndEpochNanos() - e.getStartEpochNanos()).isEqualTo(10_000_000); - assertThat(e.getAttributes().get(CODE_STACKTRACE)).isBlank(); + assertThat(e.getAttributes().get(CodeAttributes.CODE_STACKTRACE)).isBlank(); assertThat(e).hasAttribute(SPAN_IS_INFERRED, true); } } diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeTest.java index 8692158a6..cd824fbc0 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/CallTreeTest.java @@ -5,7 +5,6 @@ package io.opentelemetry.contrib.inferredspans.internal; -import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.CODE_STACKTRACE; import static io.opentelemetry.contrib.inferredspans.internal.semconv.Attributes.LINK_IS_CHILD; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; import static java.util.stream.Collectors.toMap; @@ -20,6 +19,7 @@ import io.opentelemetry.contrib.inferredspans.internal.util.DisabledOnOpenJ9; import io.opentelemetry.sdk.trace.data.LinkData; import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.semconv.CodeAttributes; import java.io.IOException; import java.time.Duration; import java.util.ArrayList; @@ -924,7 +924,7 @@ private Map assertCallTree( .describedAs("Unexpected duration for span %s", span) .isEqualTo(durationMs * 1_000_000L); - String actualStacktrace = span.getAttributes().get(CODE_STACKTRACE); + String actualStacktrace = span.getAttributes().get(CodeAttributes.CODE_STACKTRACE); if (stackTrace == null || stackTrace.isEmpty()) { assertThat(actualStacktrace).isBlank(); } else { @@ -1052,7 +1052,7 @@ private static class StackTraceEvent { private final List trace; private final long nanoTime; - public StackTraceEvent(List trace, long nanoTime) { + StackTraceEvent(List trace, long nanoTime) { this.trace = trace; this.nanoTime = nanoTime; diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerQueueTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerQueueTest.java index 614f75e72..bcf56691b 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerQueueTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerQueueTest.java @@ -17,7 +17,7 @@ import org.junit.jupiter.api.condition.DisabledOnOs; import org.junit.jupiter.api.condition.OS; -public class SamplingProfilerQueueTest { +class SamplingProfilerQueueTest { @Test @DisabledOnOs(OS.WINDOWS) diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerTest.java index a355cb76f..b97ce8729 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/SamplingProfilerTest.java @@ -144,13 +144,13 @@ void testStartCommand() { setupProfiler(false); assertThat(setup.profiler.createStartCommand()) .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=5ms,filter,file=null,safemode=0"); + "start,jfr,clock=m,event=wall,nobatch,cstack=n,interval=5ms,filter,file=null,safemode=0"); setup.close(); setupProfiler(config -> config.startScheduledProfiling(false).profilerLoggingEnabled(false)); assertThat(setup.profiler.createStartCommand()) .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=5ms,filter,file=null,safemode=0,loglevel=none"); + "start,jfr,clock=m,event=wall,nobatch,cstack=n,interval=5ms,filter,file=null,safemode=0,loglevel=none"); setup.close(); setupProfiler( @@ -162,7 +162,7 @@ void testStartCommand() { .asyncProfilerSafeMode(14)); assertThat(setup.profiler.createStartCommand()) .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=10ms,filter,file=null,safemode=14,loglevel=none"); + "start,jfr,clock=m,event=wall,nobatch,cstack=n,interval=10ms,filter,file=null,safemode=14,loglevel=none"); } @Test diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParserTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParserTest.java index 0bf2d042e..60273534b 100644 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParserTest.java +++ b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/asyncprofiler/JfrParserTest.java @@ -58,6 +58,6 @@ void name() throws Exception { } stackFrames.clear(); }); - assertThat(stackTraces.get()).isEqualTo(66); + assertThat(stackTraces.get()).isEqualTo(92); } } diff --git a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/semconv/AttributesTest.java b/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/semconv/AttributesTest.java deleted file mode 100644 index cc39c4960..000000000 --- a/inferred-spans/src/test/java/io/opentelemetry/contrib/inferredspans/internal/semconv/AttributesTest.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright The OpenTelemetry Authors - * SPDX-License-Identifier: Apache-2.0 - */ - -package io.opentelemetry.contrib.inferredspans.internal.semconv; - -import static org.assertj.core.api.Assertions.assertThat; - -import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes; -import org.junit.jupiter.api.Test; - -public class AttributesTest { - - @Test - public void checkCodeStacktraceUpToDate() { - assertThat(Attributes.CODE_STACKTRACE).isEqualTo(CodeIncubatingAttributes.CODE_STACKTRACE); - } -} diff --git a/inferred-spans/src/test/resources/logging.properties b/inferred-spans/src/test/resources/logging.properties new file mode 100644 index 000000000..f9ad0c6c2 --- /dev/null +++ b/inferred-spans/src/test/resources/logging.properties @@ -0,0 +1,4 @@ +handlers=java.util.logging.ConsoleHandler +.level=ALL +java.util.logging.ConsoleHandler.level=ALL +java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter diff --git a/inferred-spans/src/test/resources/recording.jfr b/inferred-spans/src/test/resources/recording.jfr index b44c32414..8f58d9350 100644 Binary files a/inferred-spans/src/test/resources/recording.jfr and b/inferred-spans/src/test/resources/recording.jfr differ diff --git a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/FlightRecorderDiagnosticCommandConnectionTest.java b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/FlightRecorderDiagnosticCommandConnectionTest.java index 8cf7f06e1..280c06909 100644 --- a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/FlightRecorderDiagnosticCommandConnectionTest.java +++ b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/FlightRecorderDiagnosticCommandConnectionTest.java @@ -5,9 +5,9 @@ package io.opentelemetry.contrib.jfr.connection; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -33,32 +33,32 @@ void assertCommercialFeaturesUnlocked() throws Exception { @Test void assertCommercialFeaturesLockedThrows() throws Exception { - assertThrows( - JfrConnectionException.class, - () -> { - ObjectName objectName = mock(ObjectName.class); - MBeanServerConnection mBeanServerConnection = mockMbeanServer(objectName, "locked"); - FlightRecorderDiagnosticCommandConnection.assertCommercialFeaturesUnlocked( - mBeanServerConnection, objectName); - }); + assertThatThrownBy( + () -> { + ObjectName objectName = mock(ObjectName.class); + MBeanServerConnection mBeanServerConnection = mockMbeanServer(objectName, "locked"); + FlightRecorderDiagnosticCommandConnection.assertCommercialFeaturesUnlocked( + mBeanServerConnection, objectName); + }) + .isInstanceOf(JfrConnectionException.class); } @Test void closeRecording() throws Exception { - assertThrows(UnsupportedOperationException.class, () -> createconnection().closeRecording(1)); + assertThatThrownBy(() -> createconnection().closeRecording(1)) + .isInstanceOf(UnsupportedOperationException.class); } @Test void testGetStream() throws Exception { - assertThrows( - UnsupportedOperationException.class, - () -> createconnection().getStream(1L, null, null, 0L)); + assertThatThrownBy(() -> createconnection().getStream(1L, null, null, 0L)) + .isInstanceOf(UnsupportedOperationException.class); } @Test void testCloneRecording() throws Exception { - assertThrows( - UnsupportedOperationException.class, () -> createconnection().cloneRecording(1, false)); + assertThatThrownBy(() -> createconnection().cloneRecording(1, false)) + .isInstanceOf(UnsupportedOperationException.class); } @Test @@ -73,7 +73,7 @@ void startRecordingParsesIdCorrectly() throws Exception { long id = connection.startRecording( new RecordingOptions.Builder().build(), RecordingConfiguration.PROFILE_CONFIGURATION); - assertEquals(id, 99); + assertThat(id).isEqualTo(99); } @Test diff --git a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/OpenDataUtilsTest.java b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/OpenDataUtilsTest.java index 9059980c3..041232b54 100644 --- a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/OpenDataUtilsTest.java +++ b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/OpenDataUtilsTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.jfr.connection; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import java.lang.management.ManagementFactory; import java.util.HashMap; @@ -49,6 +49,6 @@ void makeOpenData() throws Exception { mBeanServerConnection.invoke( objectInstance.getObjectName(), "getRecordingSettings", args, argTypes); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } } diff --git a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingConfigurationTest.java b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingConfigurationTest.java index d34bb3f45..091a6ca5f 100644 --- a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingConfigurationTest.java +++ b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingConfigurationTest.java @@ -5,10 +5,9 @@ package io.opentelemetry.contrib.jfr.connection; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; import java.io.IOException; import java.nio.file.Files; @@ -44,18 +43,20 @@ void tearDown() { @Test void nullConfigThrows() { - assertThrows(IllegalArgumentException.class, () -> new JfcFileConfiguration(null)); + assertThatThrownBy(() -> new JfcFileConfiguration(null)) + .isInstanceOf(IllegalArgumentException.class); } @Test void brokenJfcConfigFileThrowsError() { - assertThrows(RuntimeMBeanException.class, () -> executeRecording("brokenJfcFile.jfc")); + assertThatThrownBy(() -> executeRecording("brokenJfcFile.jfc")) + .isInstanceOf(RuntimeMBeanException.class); } @Test void jfcFileFromInputStreamCanBeRead() { IItemCollection recordingContent = executeRecording("sampleJfcFile.jfc"); - assertTrue(containsEvent(recordingContent, "jdk.ThreadAllocationStatistics")); + assertThat(containsEvent(recordingContent, "jdk.ThreadAllocationStatistics")).isTrue(); } @Test @@ -68,9 +69,9 @@ void mapConfiguration() { RecordingConfiguration recordingConfiguration = new MapConfiguration(recordingConfigAsMap); IItemCollection recordingContent = excecuteRecordingWithConfig(recordingConfiguration); - assertNotNull(recordingContent, "excecuteRecordingWithConfig returned null"); - assertTrue(containsEvent(recordingContent, "jdk.ObjectAllocationInNewTLAB")); - assertTrue(containsEvent(recordingContent, "jdk.ObjectAllocationOutsideTLAB")); + assertThat(recordingContent).isNotNull(); + assertThat(containsEvent(recordingContent, "jdk.ObjectAllocationInNewTLAB")).isTrue(); + assertThat(containsEvent(recordingContent, "jdk.ObjectAllocationOutsideTLAB")).isTrue(); } private static boolean containsEvent(IItemCollection recordingContent, String eventName) { @@ -110,7 +111,7 @@ private IItemCollection excecuteRecordingWithConfig(RecordingConfiguration confi } recording.stop(); recording.dump(dumpFile.toString()); - assertTrue(Files.exists(dumpFile)); + assertThat(dumpFile).exists(); try { return JfrLoaderToolkit.loadEvents(dumpFile.toFile()); diff --git a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingOptionsTest.java b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingOptionsTest.java index 9291fb7f2..2ca0135ba 100644 --- a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingOptionsTest.java +++ b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingOptionsTest.java @@ -5,8 +5,8 @@ package io.opentelemetry.contrib.jfr.connection; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.google.errorprone.annotations.Keep; import java.util.HashMap; @@ -33,14 +33,14 @@ private static Stream testGetName() { @MethodSource void testGetName(String testValue, String expected) { RecordingOptions opts = new RecordingOptions.Builder().name(testValue).build(); - assertEquals(expected, opts.getName()); + assertThat(opts.getName()).isEqualTo(expected); } @Test void testGetNameDefault() { String expected = ""; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getName()); + assertThat(opts.getName()).isEqualTo(expected); } @Keep @@ -64,14 +64,14 @@ static Stream testGetMaxAge() { @MethodSource void testGetMaxAge(String testValue, String expected) { RecordingOptions opts = new RecordingOptions.Builder().maxAge(testValue).build(); - assertEquals(expected, opts.getMaxAge()); + assertThat(opts.getMaxAge()).isEqualTo(expected); } @Test void testGetMaxAgeDefault() { String expected = "0"; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getMaxAge()); + assertThat(opts.getMaxAge()).isEqualTo(expected); } @Keep @@ -91,9 +91,8 @@ private static Stream testGetMaxAgeNegative() { @ParameterizedTest @MethodSource void testGetMaxAgeNegative(String badValue) { - assertThrows( - IllegalArgumentException.class, - () -> new RecordingOptions.Builder().maxAge(badValue).build()); + assertThatThrownBy(() -> new RecordingOptions.Builder().maxAge(badValue).build()) + .isInstanceOf(IllegalArgumentException.class); } @Keep @@ -113,14 +112,14 @@ private static Stream testGetMaxSize() { @MethodSource void testGetMaxSize(String testValue, String expected) { RecordingOptions opts = new RecordingOptions.Builder().maxSize(testValue).build(); - assertEquals(expected, opts.getMaxSize()); + assertThat(opts.getMaxSize()).isEqualTo(expected); } @Test void testGetMaxSizeDefault() { String expected = "0"; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getMaxSize()); + assertThat(opts.getMaxSize()).isEqualTo(expected); } @Keep @@ -135,30 +134,29 @@ private static Stream testGetMaxSizeNegative() { @ParameterizedTest @MethodSource void testGetMaxSizeNegative(String badValue) { - assertThrows( - IllegalArgumentException.class, - () -> new RecordingOptions.Builder().maxSize(badValue).build()); + assertThatThrownBy(() -> new RecordingOptions.Builder().maxSize(badValue).build()) + .isInstanceOf(IllegalArgumentException.class); } @Test void testGetDumpOnExit() { String expected = "true"; RecordingOptions opts = new RecordingOptions.Builder().dumpOnExit(expected).build(); - assertEquals(expected, opts.getDumpOnExit()); + assertThat(opts.getDumpOnExit()).isEqualTo(expected); } @Test void testGetDumpOnExitDefault() { String expected = "false"; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getDumpOnExit()); + assertThat(opts.getDumpOnExit()).isEqualTo(expected); } @Test void testGetDumpOnExitBadValue() { String expected = "false"; RecordingOptions opts = new RecordingOptions.Builder().dumpOnExit("BAD_VALUE").build(); - assertEquals(expected, opts.getDumpOnExit()); + assertThat(opts.getDumpOnExit()).isEqualTo(expected); } @Keep @@ -175,35 +173,35 @@ private static Stream testGetDestination() { @MethodSource void testGetDestination(String testValue, String expected) { RecordingOptions opts = new RecordingOptions.Builder().destination(testValue).build(); - assertEquals(expected, opts.getDestination()); + assertThat(opts.getDestination()).isEqualTo(expected); } @Test void testGetDestinationDefault() { String expected = ""; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getDestination()); + assertThat(opts.getDestination()).isEqualTo(expected); } @Test void testGetDisk() { String expected = "true"; RecordingOptions opts = new RecordingOptions.Builder().disk(expected).build(); - assertEquals(expected, opts.getDisk()); + assertThat(opts.getDisk()).isEqualTo(expected); } @Test void testGetDiskDefault() { String expected = "false"; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getDisk()); + assertThat(opts.getDisk()).isEqualTo(expected); } @Test void testGetDiskBadValue() { String expected = "false"; RecordingOptions opts = new RecordingOptions.Builder().disk("BAD_VALUE").build(); - assertEquals(expected, opts.getDisk()); + assertThat(opts.getDisk()).isEqualTo(expected); } @Keep @@ -227,14 +225,14 @@ private static Stream testGetDuration() { @MethodSource void testGetDuration(String testValue, String expected) { RecordingOptions opts = new RecordingOptions.Builder().duration(testValue).build(); - assertEquals(expected, opts.getDuration()); + assertThat(opts.getDuration()).isEqualTo(expected); } @Test void testGetDurationDefault() { String expected = "0"; RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getDuration()); + assertThat(opts.getDuration()).isEqualTo(expected); } @Keep @@ -254,9 +252,8 @@ private static Stream testGetDurationNegative() { @ParameterizedTest @MethodSource void testGetDurationNegative(String badValue) { - assertThrows( - IllegalArgumentException.class, - () -> new RecordingOptions.Builder().duration(badValue).build()); + assertThatThrownBy(() -> new RecordingOptions.Builder().duration(badValue).build()) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -279,7 +276,7 @@ void testGetRecordingOptions() { .disk("true") .duration("120 s") .build(); - assertEquals(expected, opts.getRecordingOptions()); + assertThat(opts.getRecordingOptions()).isEqualTo(expected); } @Test @@ -289,6 +286,6 @@ void testGetRecordingOptionsDefaults() { // to insure consistent behaviour. expected.put("disk", "false"); RecordingOptions opts = new RecordingOptions.Builder().build(); - assertEquals(expected, opts.getRecordingOptions()); + assertThat(opts.getRecordingOptions()).isEqualTo(expected); } } diff --git a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingTest.java b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingTest.java index ac5c781ee..715d25bcb 100644 --- a/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingTest.java +++ b/jfr-connection/src/test/java/io/opentelemetry/contrib/jfr/connection/RecordingTest.java @@ -5,12 +5,8 @@ package io.opentelemetry.contrib.jfr.connection; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; import com.google.errorprone.annotations.Keep; import java.io.FileInputStream; @@ -109,8 +105,8 @@ void tearDown() { @Test void assertNewRecordingInitialValues() { try (Recording recording = flightRecorderConnection.newRecording(null, null)) { - assertEquals(Recording.State.NEW, recording.getState()); - assertEquals(-1, recording.getId()); + assertThat(recording.getState()).isEqualTo(Recording.State.NEW); + assertThat(recording.getId()).isEqualTo(-1); } catch (IOException | IllegalStateException | JfrConnectionException exception) { fail("assertNewRecordingInitialValues caught exception", exception); } @@ -120,8 +116,8 @@ void assertNewRecordingInitialValues() { void assertRecordingStartIdAndState() { try (Recording recording = flightRecorderConnection.newRecording(null, null)) { long id = recording.start(); - assertEquals(id, recording.getId()); - assertEquals(Recording.State.RECORDING, recording.getState()); + assertThat(recording.getId()).isEqualTo(id); + assertThat(recording.getState()).isEqualTo(Recording.State.RECORDING); } catch (IOException | IllegalStateException | JfrConnectionException e) { fail("assertRecordingStartIdAndState caught exception", e); } @@ -131,9 +127,9 @@ void assertRecordingStartIdAndState() { void assertRecordingStopState() { try (Recording recording = flightRecorderConnection.newRecording(null, null)) { long id = recording.start(); - assertEquals(id, recording.getId()); + assertThat(recording.getId()).isEqualTo(id); recording.stop(); - assertEquals(Recording.State.STOPPED, recording.getState()); + assertThat(recording.getState()).isEqualTo(Recording.State.STOPPED); } catch (IOException | IllegalStateException | JfrConnectionException e) { fail("assertRecordingStopState caught exception", e); } @@ -143,9 +139,9 @@ void assertRecordingStopState() { void assertRecordingCloseState() { try (Recording recording = flightRecorderConnection.newRecording(null, null)) { long id = recording.start(); - assertEquals(id, recording.getId()); + assertThat(recording.getId()).isEqualTo(id); recording.close(); - assertEquals(Recording.State.CLOSED, recording.getState()); + assertThat(recording.getState()).isEqualTo(Recording.State.CLOSED); } catch (IOException | IllegalStateException | JfrConnectionException e) { fail("assertRecordingCloseState caught exception", e); } @@ -255,7 +251,7 @@ void assertInvalidStateChangeThrowsIllegalStateException( try (Recording recording = flightRecorderConnection.newRecording(null, null)) { reflectivelyInvokeMethods(recording, args); } catch (InvocationTargetException invocationTargetException) { - assertTrue(invocationTargetException.getCause() instanceof IllegalStateException); + assertThat(invocationTargetException.getCause()).isInstanceOf(IllegalStateException.class); } catch (Exception e) { fail("Bad test code", e); } @@ -322,7 +318,7 @@ void assertRecordingOptionsAreSetInFlightRecorderMXBean( "getRecordingOptions", new Object[] {id}, new String[] {long.class.getName()}); - assertFalse(flightRecorderMXBeanOptions.isEmpty()); + assertThat(flightRecorderMXBeanOptions.isEmpty()).isFalse(); ((Collection) flightRecorderMXBeanOptions.values()) .forEach( compositeData -> { @@ -344,7 +340,7 @@ void assertRecordingOptionsAreSetInFlightRecorderMXBean( // and for destination since FlightRecorderMXBean returns null as default if (!("name".equals(key) && "".equals(actual)) && !("destination".equals(key) && "".equals(actual))) { - assertEquals(expected, actual, getter); + assertThat(actual).as(getter).isEqualTo(expected); } } catch (NoSuchMethodException | IllegalArgumentException @@ -393,7 +389,7 @@ void assertFileExistsAfterRecordingDump() { recording.stop(); Path dumpFile = Paths.get(System.getProperty("user.dir"), "testRecordingDump_dumped.jfr"); recording.dump(dumpFile.toString()); - assertTrue(Files.exists(dumpFile)); + assertThat(dumpFile).exists(); } catch (IllegalArgumentException badData) { fail("Issue in test data: " + badData.getMessage()); } catch (IOException ioe) { @@ -428,7 +424,7 @@ void assertFileExistsAfterRecordingStream() { fail(e.getMessage(), e); } - assertTrue(Files.exists(streamedFile)); + assertThat(streamedFile).exists(); } catch (IllegalArgumentException badData) { fail("Issue in test data: " + badData.getMessage()); @@ -502,9 +498,9 @@ void assertRecordingCloneState() { try (Recording recording = flightRecorderConnection.newRecording(recordingOptions, null)) { recording.start(); Recording clone = recording.clone(true); - assertSame(recording.getState(), Recording.State.RECORDING); - assertSame(clone.getState(), Recording.State.STOPPED); - assertNotEquals(recording.getId(), clone.getId()); + assertThat(recording.getState()).isEqualTo(Recording.State.RECORDING); + assertThat(clone.getState()).isEqualTo(Recording.State.STOPPED); + assertThat(recording.getId()).isNotEqualTo(clone.getId()); recording.stop(); } catch (IOException ioe) { // possible that this can be thrown, but should not happen in this context diff --git a/jfr-events/src/test/java/io/opentelemetry/contrib/jfrevent/JfrSpanProcessorTest.java b/jfr-events/src/test/java/io/opentelemetry/contrib/jfrevent/JfrSpanProcessorTest.java index 939a60af6..7225e72fa 100644 --- a/jfr-events/src/test/java/io/opentelemetry/contrib/jfrevent/JfrSpanProcessorTest.java +++ b/jfr-events/src/test/java/io/opentelemetry/contrib/jfrevent/JfrSpanProcessorTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.jfrevent; -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.Tracer; @@ -52,7 +52,7 @@ void tearDown() { * @throws java.io.IOException on io error */ @Test - public void basicSpan() throws IOException { + void basicSpan() throws IOException { Path output = Files.createTempFile("test-basic-span", ".jfr"); try { @@ -89,7 +89,7 @@ public void basicSpan() throws IOException { * @throws java.lang.InterruptedException interrupted sleep */ @Test - public void basicSpanWithScope() throws IOException, InterruptedException { + void basicSpanWithScope() throws IOException, InterruptedException { Path output = Files.createTempFile("test-basic-span-with-scope", ".jfr"); try { diff --git a/jmx-metrics/README.md b/jmx-metrics/README.md index 359470774..a9cab0d47 100644 --- a/jmx-metrics/README.md +++ b/jmx-metrics/README.md @@ -1,5 +1,7 @@ # JMX Metric Gatherer +**Deprecation notice**: the JMX Metric Gatherer is deprecated and replaced by [JMX Scraper](../jmx-scraper/), see [migration instructions](../jmx-scraper/README.md#migration-from-jmx-gatherer). + This utility provides an easy framework for gathering and reporting metrics based on queried MBeans from a JMX server. It loads included and/or custom Groovy scripts and establishes a helpful, bound `otel` object with methods for obtaining MBeans and constructing OpenTelemetry instruments: diff --git a/jmx-metrics/build.gradle.kts b/jmx-metrics/build.gradle.kts index 9fd453d06..bc895cfc5 100644 --- a/jmx-metrics/build.gradle.kts +++ b/jmx-metrics/build.gradle.kts @@ -1,7 +1,7 @@ plugins { id("otel.java-conventions") application - id("com.github.johnrengelman.shadow") + id("com.gradleup.shadow") id("otel.groovy-conventions") id("otel.publish-conventions") @@ -12,12 +12,11 @@ otelJava.moduleName.set("io.opentelemetry.contrib.jmxmetrics") application.mainClass.set("io.opentelemetry.contrib.jmxmetrics.JmxMetrics") -val groovyVersion = "3.0.24" +val groovyVersion = "3.0.25" dependencies { api(platform("org.codehaus.groovy:groovy-bom:$groovyVersion")) - implementation("io.grpc:grpc-netty-shaded:1.71.0") implementation("org.codehaus.groovy:groovy-jmx") implementation("org.codehaus.groovy:groovy") implementation("io.prometheus:simpleclient") @@ -48,7 +47,7 @@ testing { dependencies { implementation("com.linecorp.armeria:armeria-grpc") implementation("com.linecorp.armeria:armeria-junit5") - implementation("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha") + implementation("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha") implementation("org.testcontainers:junit-jupiter") implementation("org.slf4j:slf4j-simple") } @@ -60,6 +59,8 @@ tasks { shadowJar { mergeServiceFiles() + duplicatesStrategy = DuplicatesStrategy.INCLUDE // required for mergeServiceFiles() + manifest { attributes["Implementation-Version"] = project.version } @@ -73,7 +74,9 @@ tasks { withType().configureEach { dependsOn(shadowJar) + inputs.files(layout.files(shadowJar)) systemProperty("shadow.jar.path", shadowJar.get().archiveFile.get().asFile.absolutePath) + systemProperty("gradle.project.version", "${project.version}") } diff --git a/jmx-metrics/docs/target-systems/cassandra.md b/jmx-metrics/docs/target-systems/cassandra.md index 50a491fab..2edcd9e64 100644 --- a/jmx-metrics/docs/target-systems/cassandra.md +++ b/jmx-metrics/docs/target-systems/cassandra.md @@ -1,7 +1,7 @@ # Casandra Metrics The JMX Metric Gatherer provides built in Cassandra metric gathering capabilities. -These metrics are sourced from Cassandra's exposed Dropwizard Metrics for each node: https://cassandra.apache.org/doc/latest/operating/metrics.html. +These metrics are sourced from Cassandra's exposed Dropwizard Metrics for each node: https://cassandra.apache.org/doc/latest/cassandra/managing/operating/metrics.html. ## Metrics diff --git a/jmx-metrics/docs/target-systems/jetty.md b/jmx-metrics/docs/target-systems/jetty.md index 46724daf3..eb74f4f51 100644 --- a/jmx-metrics/docs/target-systems/jetty.md +++ b/jmx-metrics/docs/target-systems/jetty.md @@ -1,7 +1,7 @@ # Jetty Metrics The JMX Metric Gatherer provides built in Jetty metric gathering capabilities. -Details about using JMX with WildFly can be found here: https://www.eclipse.org/jetty/documentation/jetty-11/operations-guide/index.html#og-jmx +Details about using JMX with WildFly can be found here: https://jetty.org/docs/jetty/11/operations-guide/jmx/index.html ### Metrics * Name: `jetty.select.count` diff --git a/jmx-metrics/src/integrationTest/java/io/opentelemetry/contrib/jmxmetrics/target_systems/KafkaIntegrationTest.java b/jmx-metrics/src/integrationTest/java/io/opentelemetry/contrib/jmxmetrics/target_systems/KafkaIntegrationTest.java index 4c2c9293c..4dddd975a 100644 --- a/jmx-metrics/src/integrationTest/java/io/opentelemetry/contrib/jmxmetrics/target_systems/KafkaIntegrationTest.java +++ b/jmx-metrics/src/integrationTest/java/io/opentelemetry/contrib/jmxmetrics/target_systems/KafkaIntegrationTest.java @@ -44,7 +44,7 @@ protected KafkaIntegrationTest(String configName) { @Container GenericContainer kafka = - new GenericContainer<>("bitnami/kafka:2.8.1") + new GenericContainer<>("bitnamilegacy/kafka:2.8.1") .withNetwork(Network.SHARED) .withEnv("KAFKA_CFG_ZOOKEEPER_CONNECT", "zookeeper:2181") .withEnv("ALLOW_PLAINTEXT_LISTENER", "yes") @@ -80,7 +80,7 @@ public Set getDependencies() { }; protected GenericContainer kafkaProducerContainer() { - return new GenericContainer<>("bitnami/kafka:2.8.1") + return new GenericContainer<>("bitnamilegacy/kafka:2.8.1") .withNetwork(Network.SHARED) .withEnv("KAFKA_CFG_ZOOKEEPER_CONNECT", "zookeeper:2181") .withEnv("ALLOW_PLAINTEXT_LISTENER", "yes") @@ -207,7 +207,7 @@ static class KafkaConsumerIntegrationTest extends KafkaIntegrationTest { @Container GenericContainer consumer = - new GenericContainer<>("bitnami/kafka:2.8.1") + new GenericContainer<>("bitnamilegacy/kafka:2.8.1") .withNetwork(Network.SHARED) .withEnv("KAFKA_CFG_ZOOKEEPER_CONNECT", "zookeeper:2181") .withEnv("ALLOW_PLAINTEXT_LISTENER", "yes") diff --git a/jmx-metrics/src/test/java/io/opentelemetry/contrib/jmxmetrics/MBeanHelperTest.java b/jmx-metrics/src/test/java/io/opentelemetry/contrib/jmxmetrics/MBeanHelperTest.java index 6a9886fe8..6fb3660c7 100644 --- a/jmx-metrics/src/test/java/io/opentelemetry/contrib/jmxmetrics/MBeanHelperTest.java +++ b/jmx-metrics/src/test/java/io/opentelemetry/contrib/jmxmetrics/MBeanHelperTest.java @@ -141,7 +141,7 @@ void transform() throws Exception { mBeanHelper.fetch(); assertThat(mBeanHelper.getAttribute("SomeAttribute")) - .hasSameElementsAs(Stream.of(new String[] {"otherValue"}).collect(Collectors.toList())); + .hasSameElementsAs(Stream.of("otherValue").collect(Collectors.toList())); } @Test @@ -169,10 +169,9 @@ void transformMultipleAttributes() throws Exception { mBeanHelper.fetch(); assertThat(mBeanHelper.getAttribute("SomeAttribute")) - .hasSameElementsAs(Stream.of(new String[] {"newValue"}).collect(Collectors.toList())); + .hasSameElementsAs(Stream.of("newValue").collect(Collectors.toList())); assertThat(mBeanHelper.getAttribute("AnotherAttribute")) - .hasSameElementsAs( - Stream.of(new String[] {"anotherNewValue"}).collect(Collectors.toList())); + .hasSameElementsAs(Stream.of("anotherNewValue").collect(Collectors.toList())); } @Test @@ -190,7 +189,7 @@ void customAttribute() throws Exception { mBeanHelper.fetch(); assertThat(mBeanHelper.getAttribute("CustomAttribute")) - .hasSameElementsAs(Stream.of(new String[] {"customValue"}).collect(Collectors.toList())); + .hasSameElementsAs(Stream.of("customValue").collect(Collectors.toList())); } private static void registerThings(String thingName) throws Exception { diff --git a/jmx-scraper/README.md b/jmx-scraper/README.md index 31fd8024d..a69736e15 100644 --- a/jmx-scraper/README.md +++ b/jmx-scraper/README.md @@ -4,13 +4,23 @@ This utility provides a way to query JMX metrics and export them to an OTLP endp The JMX MBeans and their metric mappings are defined in YAML and reuse implementation from [jmx-metrics instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/jmx-metrics). -This is currently a work-in-progress component not ready to be used in production. -The end goal is to provide an alternative to the [JMX Gatherer](../jmx-metrics/README.md) utility. +This is an alternative to the [JMX Gatherer](../jmx-metrics/README.md) utility. + +## Release + +This project is released as part of the [OpenTelemetry Java Contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) project. +The latest release is available from: + +- [GitHub Release assets](https://github.com/open-telemetry/opentelemetry-java-contrib/releases/latest/download/opentelemetry-jmx-scraper.jar) +- [Maven Central](https://central.sonatype.com/artifact/io.opentelemetry.contrib/opentelemetry-jmx-scraper) ## Usage The general command to invoke JMX scraper is `java -jar scraper.jar `, where `scraper.jar` -is the `build/libs/opentelemetry-jmx-scraper-.jar` packaged binary when building this module. +is the packaged binary: + +- `build/libs/opentelemetry-jmx-scraper-.jar` when building from sources +- `.jar` file downloaded from Maven central or Release assets Minimal configuration required @@ -58,20 +68,20 @@ If there is a need to override existing ready-to-use metrics or to keep control Supported values for `otel.jmx.target.system` and support for `otel.jmx.target.source` and links to the metrics definitions: -| `otel.jmx.target.system` | description | `legacy` | `instrumentation` | -|--------------------------|-----------------------|-----------------------------------------------------------------|-------------------| -| `activemq` | Apache ActiveMQ | [`activemq.yaml`](src/main/resources/activemq.yaml) | | -| `cassandra` | Apache Cassandra | [`cassandra.yaml`](src/main/resources/cassandra.yaml) | | -| `hbase` | Apache HBase | [`hbase.yaml`](src/main/resources/hbase.yaml) | | -| `hadoop` | Apache Hadoop | [`hadoop.yaml`](src/main/resources/hadoop.yaml) | | -| `jetty` | Eclipse Jetty | [`jetty.yaml`](src/main/resources/jetty.yaml) | | -| `jvm` | JVM runtime metrics | [`jvm.yaml`](src/main/resources/jvm.yaml) | | -| `kafka` | Apache Kafka | [`kafka.yaml`](src/main/resources/kafka.yaml) | | -| `kafka-consumer` | Apache Kafka consumer | [`kafka-consumer.yaml`](src/main/resources/kafka-consumer.yaml) | | -| `kafka-producer` | Apache Kafka producer | [`kafka-producer.yaml`](src/main/resources/kafka-producer.yaml) | | -| `solr` | Apache Solr | [`solr.yaml`](src/main/resources/solr.yaml) | | -| `tomcat` | Apache Tomcat | [`tomcat.yaml`](src/main/resources/tomcat.yaml) | | -| `wildfly` | Wildfly | [`wildfly.yaml`](src/main/resources/wildfly.yaml) | | +| `otel.jmx.target.system` | description | `legacy` | `instrumentation` | +|--------------------------|-----------------------|-----------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `activemq` | Apache ActiveMQ | [`activemq.yaml`](src/main/resources/activemq.yaml) | | +| `cassandra` | Apache Cassandra | [`cassandra.yaml`](src/main/resources/cassandra.yaml) | | +| `hbase` | Apache HBase | [`hbase.yaml`](src/main/resources/hbase.yaml) | | +| `hadoop` | Apache Hadoop | [`hadoop.yaml`](src/main/resources/hadoop.yaml) | | +| `jetty` | Eclipse Jetty | [`jetty.yaml`](src/main/resources/jetty.yaml) | [`jetty.yaml`](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/src/main/resources/jmx/rules/jetty.yaml) ([doc](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/jetty.md)) | +| `jvm` | JVM runtime metrics | [`jvm.yaml`](src/main/resources/jvm.yaml) | [`jvm.yaml`](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/src/main/resources/jmx/rules/jvm.yaml) ([doc](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/jvm.md)) | +| `kafka` | Apache Kafka | [`kafka.yaml`](src/main/resources/kafka.yaml) | | +| `kafka-consumer` | Apache Kafka consumer | [`kafka-consumer.yaml`](src/main/resources/kafka-consumer.yaml) | | +| `kafka-producer` | Apache Kafka producer | [`kafka-producer.yaml`](src/main/resources/kafka-producer.yaml) | | +| `solr` | Apache Solr | [`solr.yaml`](src/main/resources/solr.yaml) | | +| `tomcat` | Apache Tomcat | [`tomcat.yaml`](src/main/resources/tomcat.yaml) | [`tomcat.yaml`](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/src/main/resources/jmx/rules/tomcat.yaml) ([doc](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/tomcat.md)) | +| `wildfly` | Wildfly | [`wildfly.yaml`](src/main/resources/wildfly.yaml) | [`wildfly.yaml`](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/src/main/resources/jmx/rules/tomcat.yaml) ([doc](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/wildfly.md)) | The source of metrics definitions is controlled by `otel.jmx.target.source`: @@ -85,12 +95,14 @@ Setting the value of `otel.jmx.target.source` allows to fit the following use-ca - `legacy` allows to keep using definitions that are very close to JMX Gatherer, this is the recommended option if preserving compatibility is required. Those definitions are in maintenance and are unlikely to evolve over time. - `instrumentation` forces using metrics definitions from instrumentation, hence only the reference. Metrics definitions and supported values of `otel.jmx.target.system` will be updated whenever the dependency on instrumentation is updated. -The following SDK configuration options are also relevant +The following [SDK configuration options](https://opentelemetry.io/docs/languages/java/configuration/#environment-variables-and-system-properties) are also relevant -| config option | default value | description | -|-------------------------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `otel.metric.export.interval` | `1m` (1 minute) | metric export interval, also controls the JMX sampling interval | -| `otel.metrics.exporter` | `otlp` | comma-separated list of metrics exporters supported values are `otlp` and `logging`, additional values might be provided through extra libraries in the classpath | +| config option | default value | description | +|-------------------------------|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `otel.metric.export.interval` | `1m` (1 minute) | metric export interval, also controls the JMX sampling interval | +| `otel.metrics.exporter` | `otlp` | comma-separated list of metrics exporters supported values are `otlp` and `logging`, additional values might be provided through extra libraries in the classpath | +| `otel.service.name` | | service name | +| `otel.resource.attributes` | | used to specify otel resource attributes, including service attributes. See [the sdk configuration](https://opentelemetry.io/docs/languages/java/configuration/#properties-resource) and [service attributes](https://opentelemetry.io/docs/specs/semconv/registry/attributes/service/) | In addition to OpenTelemetry configuration, the following Java system properties can be provided through the command-line arguments, properties file or stdin and will be propagated to the JVM system properties: @@ -134,6 +146,45 @@ When doing so, the `java -jar` command can´t be used, we have to provide the cl java -cp scraper.jar:jboss-client.jar io.opentelemetry.contrib.jmxscraper.JmxScraper ``` +## Migration from JMX Gatherer + +The JMX Scraper aims to replace the [JMX Gatherer](../jmx-metrics) tool and thus share most features +and configuration with it. + +Features not supported: + +- Define and capture metrics with custom Groovy definitions with `otel.jmx.groovy.script`, this is now replaced with YAML and `otel.jmx.config` configuration option. +- Ability to export to prometheus collector, only the OTLP exporter is included. + +The YAML-based implementation is provided by [java instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/jmx-metrics) +and thus should be used for syntax details and documentation. + +Like with the JMX Gatherer, the selection of provided metrics to use is still done with `otel.jmx.target.system` configuration option. + +However, there is now two distinct sets of metrics to select from using the `otel.jmx.target.source` configuration option: + +- `legacy`: [metrics definitions](./src/main/resources) equivalent to JMX Gatherer definitions to help transition and preserve compatibility +- `instrumentation`: [metrics definitions inherited from instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/jmx-metrics/library/src/main/resources/jmx/rules/), which is now used as a reference for JMX metrics, those also aim to provide better alignment with [metrics semantic conventions](https://opentelemetry.io/docs/specs/semconv/general/metrics/). + +In both cases, the metrics definitions themselves are embedded in the JMX Scraper binary, thus they +will only change if the release version of the JMX Scraper binary changes. + +By default, `otel.jmx.target.source` is `auto`, which means for each value of `otel.jmx.target.system`: + +- Metrics definitions from instrumentation will be used by default, if available. +- Legacy metrics definitions equivalent to JMX Gatherer will be used as fallback. +- Whenever new metrics definitions are being added or modified in instrumentation, those newer definitions will be used. + +There are multiple possible strategies depending on the ability or willingness to embrace change in metrics definitions: + +- To preserve maximum compatibility, using `legacy` is the recommended option, however it means to not benefit from future updates and contributions. +- To only get the most recent definitions, using `instrumentation` ensures that none of the legacy definitions is used, only the reference from instrumentation closer to semconv recommendations, those could still evolve over time. +- To embrace reference definitions whenever they become available, using `auto` is the recommended option, however it means the metrics produced could change when updating the version of JMX Scraper. +- To handle more complex migration strategies or for tight control of metrics definitions, using copies of the YAML metrics definitions and providing them explicitly with `otel.jmx.config` is the recommended option. + +When using `otel.target.source` = `auto` or `legacy`, one or more legacy definitions might be used. If strict compatibility with metrics produced by JMX Gatherer is required it is recommended to review +the [legacy metrics definitions YAML files](./src/main/resources/) as they contain comments on the minor differences with JMX Gatherer Groovy definitions. + ## Component owners - [Jason Plumb](https://github.com/breedx-splk), Splunk diff --git a/jmx-scraper/build.gradle.kts b/jmx-scraper/build.gradle.kts index 0d174717e..e2f4dc461 100644 --- a/jmx-scraper/build.gradle.kts +++ b/jmx-scraper/build.gradle.kts @@ -1,6 +1,6 @@ plugins { application - id("com.github.johnrengelman.shadow") + id("com.gradleup.shadow") id("otel.java-conventions") @@ -26,6 +26,8 @@ dependencies { implementation("io.opentelemetry.instrumentation:opentelemetry-jmx-metrics") + implementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") + testImplementation("org.junit-pioneer:junit-pioneer") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") testImplementation("org.awaitility:awaitility") @@ -39,9 +41,9 @@ testing { implementation("org.slf4j:slf4j-simple") implementation("com.linecorp.armeria:armeria-junit5") implementation("com.linecorp.armeria:armeria-grpc") - implementation("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha") - implementation("org.bouncycastle:bcprov-jdk18on:1.80") - implementation("org.bouncycastle:bcpkix-jdk18on:1.80") + implementation("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha") + implementation("org.bouncycastle:bcprov-jdk18on:1.82") + implementation("org.bouncycastle:bcpkix-jdk18on:1.82") } } } @@ -51,6 +53,8 @@ tasks { shadowJar { mergeServiceFiles() + duplicatesStrategy = DuplicatesStrategy.INCLUDE // required for mergeServiceFiles() + manifest { attributes["Implementation-Version"] = project.version } @@ -64,20 +68,23 @@ tasks { withType().configureEach { dependsOn(shadowJar) + inputs.files(layout.files(shadowJar)) systemProperty("shadow.jar.path", shadowJar.get().archiveFile.get().asFile.absolutePath) val testAppTask = project("test-app").tasks.named("jar") dependsOn(testAppTask) + inputs.files(layout.files(testAppTask)) systemProperty("app.jar.path", testAppTask.get().archiveFile.get().asFile.absolutePath) val testWarTask = project("test-webapp").tasks.named("war") dependsOn(testWarTask) + inputs.files(layout.files(testWarTask)) systemProperty("app.war.path", testWarTask.get().archiveFile.get().asFile.absolutePath) systemProperty("gradle.project.version", "${project.version}") develocity.testRetry { - // You can see tests that were retried by this mechanism in the collected test reports and build scans. + // TODO (trask) fix flaky tests and remove this workaround if (System.getenv().containsKey("CI")) { maxRetries.set(5) } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxConnectionTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxConnectionTest.java index d1431647b..00d7fc124 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxConnectionTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxConnectionTest.java @@ -9,6 +9,7 @@ import java.nio.file.Path; import java.security.cert.X509Certificate; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.junit.jupiter.api.AfterAll; @@ -28,7 +29,7 @@ * JmxConnectionBuilder and relies on containers to minimize the JMX/RMI network complications which * are not NAT-friendly. */ -public class JmxConnectionTest { +class JmxConnectionTest { // OTLP endpoint is not used in test mode, but still has to be provided private static final String DUMMY_OTLP_ENDPOINT = "http://dummy-otlp-endpoint:8080/"; @@ -131,7 +132,7 @@ private void testServerSsl( } @ParameterizedTest - @EnumSource(value = JmxScraperContainer.ConfigSource.class) + @EnumSource void serverSslClientSsl(JmxScraperContainer.ConfigSource configSource) { // Note: this could have been made simpler by relying on the fact that keystore could be used // as a trust store, but having clear split provides also some extra clarity @@ -175,6 +176,42 @@ void serverSslClientSsl(JmxScraperContainer.ConfigSource configSource) { .withConfigSource(configSource)); } + @Test + void stableServiceInstanceServiceId() { + // start a single app, connect twice to it and check that the service id is the same + try (TestAppContainer app = appContainer().withJmxPort(JMX_PORT)) { + app.start(); + + UUID firstId = startScraperAndGetServiceId(); + UUID secondId = startScraperAndGetServiceId(); + + assertThat(firstId) + .describedAs( + "connecting twice to the same JVM should return the same service instance ID") + .isEqualTo(secondId); + } + } + + private static UUID startScraperAndGetServiceId() { + try (JmxScraperContainer scraper = + scraperContainer() + .withRmiServiceUrl(APP_HOST, JMX_PORT) + // does not need to be tested on all config sources + .withConfigSource(JmxScraperContainer.ConfigSource.SYSTEM_PROPERTIES)) { + scraper.start(); + waitTerminated(scraper); + String[] logLines = scraper.getLogs().split("\n"); + UUID serviceId = null; + for (String logLine : logLines) { + if (logLine.contains("remote service instance ID")) { + serviceId = UUID.fromString(logLine.substring(logLine.lastIndexOf(":") + 1).trim()); + } + } + assertThat(serviceId).describedAs("unable to get service instance ID from logs").isNotNull(); + return serviceId; + } + } + private static void connectionTest( Function customizeApp, Function customizeScraper) { diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxScraperContainer.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxScraperContainer.java index 8d06c0d7a..d1148fc89 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxScraperContainer.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/JmxScraperContainer.java @@ -237,8 +237,8 @@ public void start() { } else { Path script = generateShellScript(cmd, config); - this.withCopyFileToContainer(MountableFile.forHostPath(script, 500), "/scraper.sh"); - this.withCommand("/scraper.sh"); + this.withCopyFileToContainer(MountableFile.forHostPath(script), "/scraper.sh"); + this.withCommand("bash", "/scraper.sh"); } logger().info("Starting scraper with command: " + String.join(" ", this.getCommandParts())); @@ -263,7 +263,7 @@ private Path generateShellScript(List cmd, Map config) { logger().info("Scraper executed with /scraper.sh shell script"); for (int i = 0; i < lines.size(); i++) { - logger().info("/scrapper.sh:{} {}", i, lines.get(i)); + logger().info("/scraper.sh:{} {}", i, lines.get(i)); } return script; } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java index 4f909fdc5..19d98bf38 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java @@ -15,7 +15,7 @@ import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; -public class ActiveMqIntegrationTest extends TargetSystemIntegrationTest { +class ActiveMqIntegrationTest extends TargetSystemIntegrationTest { private static final int ACTIVEMQ_PORT = 61616; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java index 90f1fca73..5ff8f2dcc 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java @@ -15,7 +15,7 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; -public class CassandraIntegrationTest extends TargetSystemIntegrationTest { +class CassandraIntegrationTest extends TargetSystemIntegrationTest { private static final int CASSANDRA_PORT = 9042; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CustomIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CustomIntegrationTest.java index dcecc8f5f..d238e8a72 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CustomIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CustomIntegrationTest.java @@ -11,7 +11,7 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; -public class CustomIntegrationTest extends TargetSystemIntegrationTest { +class CustomIntegrationTest extends TargetSystemIntegrationTest { @Override protected GenericContainer createTargetContainer(int jmxPort) { diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java index 803137539..4bab7e574 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java @@ -15,7 +15,7 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; -public class HBaseIntegrationTest extends TargetSystemIntegrationTest { +class HBaseIntegrationTest extends TargetSystemIntegrationTest { @Override protected GenericContainer createTargetContainer(int jmxPort) { return new GenericContainer<>("dajobe/hbase") diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HadoopIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HadoopIntegrationTest.java index b89225629..bc44ea36c 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HadoopIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HadoopIntegrationTest.java @@ -16,7 +16,7 @@ import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.utility.MountableFile; -public class HadoopIntegrationTest extends TargetSystemIntegrationTest { +class HadoopIntegrationTest extends TargetSystemIntegrationTest { private static final int HADOOP_PORT = 50070; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java index 86097cbaa..8c91e0c85 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java @@ -16,7 +16,7 @@ import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; -public class JettyIntegrationTest extends TargetSystemIntegrationTest { +class JettyIntegrationTest extends TargetSystemIntegrationTest { private static final int JETTY_PORT = 8080; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JvmIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JvmIntegrationTest.java index 6c4069638..be8a04a53 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JvmIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JvmIntegrationTest.java @@ -15,7 +15,7 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; -public class JvmIntegrationTest extends TargetSystemIntegrationTest { +class JvmIntegrationTest extends TargetSystemIntegrationTest { @Override protected GenericContainer createTargetContainer(int jmxPort) { @@ -32,12 +32,6 @@ protected JmxScraperContainer customizeScraperContainer( JmxScraperContainer scraper, GenericContainer target, Path tempDir) { return scraper .withTargetSystem("jvm") - // TODO when JVM metrics will be added to instrumentation, the default "auto" source - // means that the definitions in instrumentation will be used, and thus this test will fail - // due to metrics differences, adding an explicit "legacy" source is required to continue - // testing the JVM metrics defined in this project. - // https://github.com/open-telemetry/opentelemetry-java-instrumentation/pull/13392 - // .withTargetSystem("legacy") // also testing custom yaml .withCustomYaml("custom-metrics.yaml"); } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/SolrIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/SolrIntegrationTest.java index fbec81e2e..9cba55701 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/SolrIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/SolrIntegrationTest.java @@ -18,7 +18,7 @@ import org.testcontainers.containers.Network; import org.testcontainers.containers.wait.strategy.Wait; -public class SolrIntegrationTest extends TargetSystemIntegrationTest { +class SolrIntegrationTest extends TargetSystemIntegrationTest { @Override protected GenericContainer createTargetContainer(int jmxPort) { diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TargetSystemIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TargetSystemIntegrationTest.java index 9c58c07fc..e74e93180 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TargetSystemIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TargetSystemIntegrationTest.java @@ -143,6 +143,12 @@ protected void startContainers(Path tmpDir) { // Create and initialize scraper container scraper = new JmxScraperContainer(otlpEndpoint, scraperBaseImage()) + // Since JVM metrics were be added to instrumentation, the default "auto" source + // means that the definitions in instrumentation will be used, and thus tests will fail + // due to metrics differences, adding an explicit "legacy" source is required to + // continue + // testing metrics defined in this project. + .withTargetSystemSource("legacy") .withLogConsumer(new Slf4jLogConsumer(jmxScraperLogger)) .withNetwork(network) .withRmiServiceUrl(TARGET_SYSTEM_NETWORK_ALIAS, JMX_PORT); diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TomcatIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TomcatIntegrationTest.java index bec9453f8..b01f4485f 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TomcatIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/TomcatIntegrationTest.java @@ -16,7 +16,7 @@ import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; -public class TomcatIntegrationTest extends TargetSystemIntegrationTest { +class TomcatIntegrationTest extends TargetSystemIntegrationTest { private static final int TOMCAT_PORT = 8080; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/WildflyIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/WildflyIntegrationTest.java index 73e546b85..bb2787b0a 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/WildflyIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/WildflyIntegrationTest.java @@ -22,7 +22,7 @@ import org.testcontainers.images.builder.ImageFromDockerfile; import org.testcontainers.utility.MountableFile; -public class WildflyIntegrationTest extends TargetSystemIntegrationTest { +class WildflyIntegrationTest extends TargetSystemIntegrationTest { private static final int WILDFLY_SERVICE_PORT = 8080; private static final int WILDFLY_MANAGEMENT_PORT = 9990; diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaConsumerIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaConsumerIntegrationTest.java index ed61aabbb..9319baf65 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaConsumerIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaConsumerIntegrationTest.java @@ -25,7 +25,7 @@ import org.testcontainers.containers.output.Slf4jLogConsumer; import org.testcontainers.containers.wait.strategy.Wait; -public class KafkaConsumerIntegrationTest extends TargetSystemIntegrationTest { +class KafkaConsumerIntegrationTest extends TargetSystemIntegrationTest { @Override protected Collection> createPrerequisiteContainers() { diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaContainerFactory.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaContainerFactory.java index 8eb9432a5..e46ed07b6 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaContainerFactory.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaContainerFactory.java @@ -12,7 +12,7 @@ public class KafkaContainerFactory { private static final int KAFKA_PORT = 9092; private static final String KAFKA_BROKER = "kafka:" + KAFKA_PORT; - private static final String KAFKA_DOCKER_IMAGE = "bitnami/kafka:2.8.1"; + private static final String KAFKA_DOCKER_IMAGE = "bitnamilegacy/kafka:2.8.1"; private KafkaContainerFactory() {} diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaIntegrationTest.java index f59040509..17d38d995 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaIntegrationTest.java @@ -21,7 +21,7 @@ import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.output.Slf4jLogConsumer; -public class KafkaIntegrationTest extends TargetSystemIntegrationTest { +class KafkaIntegrationTest extends TargetSystemIntegrationTest { @Override protected Collection> createPrerequisiteContainers() { GenericContainer zookeeper = diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaProducerIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaProducerIntegrationTest.java index 155cb9fc5..37b141b01 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaProducerIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/kafka/KafkaProducerIntegrationTest.java @@ -24,7 +24,7 @@ import org.testcontainers.containers.output.Slf4jLogConsumer; import org.testcontainers.containers.wait.strategy.Wait; -public class KafkaProducerIntegrationTest extends TargetSystemIntegrationTest { +class KafkaProducerIntegrationTest extends TargetSystemIntegrationTest { @Override protected Collection> createPrerequisiteContainers() { diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/InvalidArgumentException.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/InvalidArgumentException.java index bdfb93272..d3358741f 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/InvalidArgumentException.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/InvalidArgumentException.java @@ -9,7 +9,7 @@ * Exception indicating something is wrong with the provided arguments or reading the configuration * from them */ -public class InvalidArgumentException extends Exception { +public final class InvalidArgumentException extends Exception { private static final long serialVersionUID = 0L; diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxConnectorBuilder.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxConnectorBuilder.java index ceba4b2fe..92ff4f387 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxConnectorBuilder.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxConnectorBuilder.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.jmxscraper; +import static java.util.logging.Level.WARNING; + import com.google.errorprone.annotations.CanIgnoreReturnValue; import java.io.IOException; import java.net.MalformedURLException; @@ -17,7 +19,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; import javax.management.remote.JMXConnector; @@ -33,7 +34,7 @@ import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.RealmCallback; -public class JmxConnectorBuilder { +public final class JmxConnectorBuilder { private static final Logger logger = Logger.getLogger(JmxConnectorBuilder.class.getName()); @@ -146,7 +147,7 @@ private Map buildEnv() { } }); } catch (ReflectiveOperationException e) { - logger.log(Level.WARNING, "SASL unsupported in current environment: " + e.getMessage()); + logger.log(WARNING, "SASL unsupported in current environment: " + e.getMessage()); } return env; } diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxScraper.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxScraper.java index 8bf9f46ce..678368d7b 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxScraper.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/JmxScraper.java @@ -5,7 +5,14 @@ package io.opentelemetry.contrib.jmxscraper; -import io.opentelemetry.api.GlobalOpenTelemetry; +import static io.opentelemetry.semconv.incubating.ServiceIncubatingAttributes.SERVICE_INSTANCE_ID; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.SEVERE; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; import io.opentelemetry.contrib.jmxscraper.config.JmxScraperConfig; import io.opentelemetry.contrib.jmxscraper.config.PropertiesCustomizer; import io.opentelemetry.contrib.jmxscraper.config.PropertiesSupplier; @@ -13,27 +20,32 @@ import io.opentelemetry.instrumentation.jmx.engine.MetricConfiguration; import io.opentelemetry.instrumentation.jmx.yaml.RuleParser; import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.autoconfigure.spi.ConfigurationException; +import io.opentelemetry.sdk.resources.Resource; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Properties; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; +import java.util.function.BiFunction; import java.util.logging.Logger; +import javax.annotation.Nullable; import javax.management.MBeanServerConnection; +import javax.management.ObjectName; import javax.management.remote.JMXConnector; -public class JmxScraper { +public final class JmxScraper { + private static final Logger logger = Logger.getLogger(JmxScraper.class.getName()); private static final String CONFIG_ARG = "-config"; private static final String TEST_ARG = "-test"; @@ -55,82 +67,110 @@ public static void main(String[] args) { // set log format System.setProperty("java.util.logging.SimpleFormatter.format", "%1$tF %1$tT %4$s %5$s%n"); - List effectiveArgs = new ArrayList<>(Arrays.asList(args)); + List effectiveArgs = new ArrayList<>(asList(args)); boolean testMode = effectiveArgs.remove(TEST_ARG); try { Properties argsConfig = argsToConfig(effectiveArgs); propagateToSystemProperties(argsConfig); - // auto-configure and register SDK PropertiesCustomizer configCustomizer = new PropertiesCustomizer(); - AutoConfiguredOpenTelemetrySdk.builder() - .addPropertiesSupplier(new PropertiesSupplier(argsConfig)) - .addPropertiesCustomizer(configCustomizer) - .setResultAsGlobal() - .build(); + // we rely on the config customizer to be executed first to get effective config. + BiFunction resourceCustomizer = + (resource, configProperties) -> { + UUID instanceId = getRemoteServiceInstanceId(configCustomizer.getConnectorBuilder()); + if (resource.getAttribute(SERVICE_INSTANCE_ID) != null || instanceId == null) { + return resource; + } + logger.log(INFO, "remote service instance ID: " + instanceId); + return resource.merge( + Resource.create(Attributes.of(SERVICE_INSTANCE_ID, instanceId.toString()))); + }; + + // auto-configure SDK + OpenTelemetry openTelemetry = + AutoConfiguredOpenTelemetrySdk.builder() + .addPropertiesSupplier(new PropertiesSupplier(argsConfig)) + .addPropertiesCustomizer(configCustomizer) + .addResourceCustomizer(resourceCustomizer) + .build() + .getOpenTelemetrySdk(); + + // scraper configuration and connector builder are built using effective SDK configuration + // thus we have to get it after the SDK is built JmxScraperConfig scraperConfig = configCustomizer.getScraperConfig(); - - long exportSeconds = scraperConfig.getSamplingInterval().toMillis() / 1000; - logger.log(Level.INFO, "metrics export interval (seconds) = " + exportSeconds); - - JmxMetricInsight service = - JmxMetricInsight.createService( - GlobalOpenTelemetry.get(), scraperConfig.getSamplingInterval().toMillis()); - JmxConnectorBuilder connectorBuilder = - JmxConnectorBuilder.createNew(scraperConfig.getServiceUrl()); - - Optional.ofNullable(scraperConfig.getUsername()).ifPresent(connectorBuilder::withUser); - Optional.ofNullable(scraperConfig.getPassword()).ifPresent(connectorBuilder::withPassword); - - if (scraperConfig.isRegistrySsl()) { - connectorBuilder.withSslRegistry(); - } + JmxConnectorBuilder connectorBuilder = configCustomizer.getConnectorBuilder(); if (testMode) { System.exit(testConnection(connectorBuilder) ? 0 : 1); } else { - JmxScraper jmxScraper = new JmxScraper(connectorBuilder, service, scraperConfig); + JmxMetricInsight jmxInsight = + JmxMetricInsight.createService( + openTelemetry, scraperConfig.getSamplingInterval().toMillis()); + JmxScraper jmxScraper = new JmxScraper(connectorBuilder, jmxInsight, scraperConfig); jmxScraper.start(); } } catch (ConfigurationException e) { - logger.log(Level.SEVERE, "invalid configuration ", e); + logger.log(SEVERE, "invalid configuration: " + e.getMessage(), e); System.exit(1); } catch (InvalidArgumentException e) { - logger.log(Level.SEVERE, e.getMessage(), e); + logger.log(SEVERE, e.getMessage(), e); logger.info("Usage: java -jar [-test] [-config ]"); logger.info(" -test test JMX connection with provided configuration and exit"); logger.info( " -config provide configuration, where is - for stdin, or "); System.exit(1); } catch (IOException e) { - logger.log(Level.SEVERE, "Unable to connect ", e); + logger.log(SEVERE, "Unable to connect ", e); System.exit(2); } catch (RuntimeException e) { - logger.log(Level.SEVERE, e.getMessage(), e); + logger.log(SEVERE, e.getMessage(), e); System.exit(3); } } private static boolean testConnection(JmxConnectorBuilder connectorBuilder) { try (JMXConnector connector = connectorBuilder.build()) { - MBeanServerConnection connection = connector.getMBeanServerConnection(); Integer mbeanCount = connection.getMBeanCount(); if (mbeanCount > 0) { - logger.log(Level.INFO, "JMX connection test OK"); + logger.log(INFO, "JMX connection test OK"); return true; } else { - logger.log(Level.SEVERE, "JMX connection test ERROR"); + logger.log(SEVERE, "JMX connection test ERROR"); return false; } } catch (IOException e) { - logger.log(Level.SEVERE, "JMX connection test ERROR", e); + logger.log(SEVERE, "JMX connection test ERROR", e); return false; } } + @Nullable + private static UUID getRemoteServiceInstanceId(JmxConnectorBuilder connectorBuilder) { + try (JMXConnector jmxConnector = connectorBuilder.build()) { + MBeanServerConnection connection = jmxConnector.getMBeanServerConnection(); + + StringBuilder id = new StringBuilder(); + try { + ObjectName objectName = new ObjectName("java.lang:type=Runtime"); + for (String attribute : Arrays.asList("StartTime", "Name")) { + Object value = connection.getAttribute(objectName, attribute); + if (id.length() > 0) { + id.append(" "); + } + id.append(value); + } + return UUID.nameUUIDFromBytes(id.toString().getBytes(StandardCharsets.UTF_8)); + } catch (Exception e) { + throw new IllegalStateException(e); + } + } catch (IOException e) { + return null; + } + } + // package private for testing static void propagateToSystemProperties(Properties properties) { for (Map.Entry entry : properties.entrySet()) { @@ -210,7 +250,7 @@ private void start() throws IOException { try (JMXConnector connector = client.build()) { MBeanServerConnection connection = connector.getMBeanServerConnection(); - service.startRemote(getMetricConfig(config), () -> Collections.singletonList(connection)); + service.startRemote(getMetricConfig(config), () -> singletonList(connection)); running.set(true); logger.info("JMX scraping started"); diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/JmxScraperConfig.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/JmxScraperConfig.java index 09a97eb83..53c19096b 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/JmxScraperConfig.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/JmxScraperConfig.java @@ -5,21 +5,25 @@ package io.opentelemetry.contrib.jmxscraper.config; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableSet; +import static java.util.Locale.ROOT; + import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import io.opentelemetry.sdk.autoconfigure.spi.ConfigurationException; import java.io.InputStream; import java.time.Duration; import java.util.ArrayList; -import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.logging.Logger; import javax.annotation.Nullable; /** This class keeps application settings */ -public class JmxScraperConfig { +public final class JmxScraperConfig { private static final Logger logger = Logger.getLogger(JmxScraperConfig.class.getName()); @@ -49,9 +53,9 @@ public class JmxScraperConfig { private String serviceUrl = ""; - private List jmxConfig = Collections.emptyList(); + private List jmxConfig = emptyList(); - private Set targetSystems = Collections.emptySet(); + private Set targetSystems = emptySet(); private TargetSystemSource targetSystemSource = TargetSystemSource.AUTO; @@ -73,7 +77,7 @@ public enum TargetSystemSource { static TargetSystemSource fromString(String source) { try { - return TargetSystemSource.valueOf(source.toUpperCase(Locale.ROOT)); + return TargetSystemSource.valueOf(source.toUpperCase(ROOT)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Invalid target system source: " + source, e); } @@ -217,8 +221,8 @@ public static JmxScraperConfig fromConfig(ConfigProperties config) { "at least one of '" + JMX_TARGET_SYSTEM + "' or '" + JMX_CONFIG + "' must be set"); } - scraperConfig.jmxConfig = Collections.unmodifiableList(jmxConfig); - scraperConfig.targetSystems = Collections.unmodifiableSet(new HashSet<>(targetSystem)); + scraperConfig.jmxConfig = unmodifiableList(jmxConfig); + scraperConfig.targetSystems = unmodifiableSet(new HashSet<>(targetSystem)); scraperConfig.username = config.getString("otel.jmx.username"); scraperConfig.password = config.getString("otel.jmx.password"); diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizer.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizer.java index 9d6812146..dc6f06952 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizer.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizer.java @@ -8,15 +8,18 @@ import static io.opentelemetry.contrib.jmxscraper.config.JmxScraperConfig.JMX_INTERVAL_LEGACY; import static io.opentelemetry.contrib.jmxscraper.config.JmxScraperConfig.METRIC_EXPORT_INTERVAL; +import io.opentelemetry.contrib.jmxscraper.JmxConnectorBuilder; import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import java.util.function.Function; +import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; /** Customizer of default SDK configuration and provider of effective scraper config */ -public class PropertiesCustomizer implements Function> { +public final class PropertiesCustomizer implements Function> { private static final Logger logger = Logger.getLogger(PropertiesCustomizer.class.getName()); @@ -24,6 +27,8 @@ public class PropertiesCustomizer implements Function apply(ConfigProperties config) { Map result = new HashMap<>(); @@ -44,10 +49,28 @@ public Map apply(ConfigProperties config) { result.put(METRIC_EXPORT_INTERVAL, intervalLegacy + "ms"); } + // scraper config and connector builder must be initialized with the effective SDK configuration + // thus we need to initialize them here and then rely on getter being called after this method. scraperConfig = JmxScraperConfig.fromConfig(config); + connectorBuilder = createConnectorBuilder(scraperConfig); + + long exportSeconds = scraperConfig.getSamplingInterval().toMillis() / 1000; + logger.log(Level.INFO, "metrics export interval (seconds) = " + exportSeconds); + return result; } + private static JmxConnectorBuilder createConnectorBuilder(JmxScraperConfig scraperConfig) { + JmxConnectorBuilder connectorBuilder = + JmxConnectorBuilder.createNew(scraperConfig.getServiceUrl()); + Optional.ofNullable(scraperConfig.getUsername()).ifPresent(connectorBuilder::withUser); + Optional.ofNullable(scraperConfig.getPassword()).ifPresent(connectorBuilder::withPassword); + if (scraperConfig.isRegistrySsl()) { + connectorBuilder.withSslRegistry(); + } + return connectorBuilder; + } + /** * Get scraper configuration from the previous call to {@link #apply(ConfigProperties)} * @@ -60,4 +83,11 @@ public JmxScraperConfig getScraperConfig() { } return scraperConfig; } + + public JmxConnectorBuilder getConnectorBuilder() { + if (connectorBuilder == null) { + throw new IllegalStateException("apply() must be called before getConnectorBuilder()"); + } + return connectorBuilder; + } } diff --git a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesSupplier.java b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesSupplier.java index 071e2b8fa..f4ad68faa 100644 --- a/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesSupplier.java +++ b/jmx-scraper/src/main/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesSupplier.java @@ -11,7 +11,7 @@ import java.util.function.Supplier; /** Configuration supplier for java properties */ -public class PropertiesSupplier implements Supplier> { +public final class PropertiesSupplier implements Supplier> { private final Properties properties; diff --git a/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/JmxScraperTest.java b/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/JmxScraperTest.java index 2d3178072..430476d51 100644 --- a/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/JmxScraperTest.java +++ b/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/JmxScraperTest.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.jmxscraper; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -12,11 +14,11 @@ import io.opentelemetry.contrib.jmxscraper.config.TestUtil; import java.io.IOException; import java.io.InputStream; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Properties; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledOnOs; +import org.junit.jupiter.api.condition.OS; import org.junitpioneer.jupiter.ClearSystemProperty; class JmxScraperTest { @@ -30,7 +32,7 @@ void shouldThrowExceptionWhenInvalidCommandLineArgsProvided() { @Test void emptyArgumentsAllowed() throws InvalidArgumentException { - assertThat(JmxScraper.argsToConfig(Collections.emptyList())) + assertThat(JmxScraper.argsToConfig(emptyList())) .describedAs("empty config allowed to use JVM properties") .isEmpty(); } @@ -41,16 +43,18 @@ void shouldThrowExceptionWhenMissingProperties() { } private static void testInvalidArguments(String... args) { - assertThatThrownBy(() -> JmxScraper.argsToConfig(Arrays.asList(args))) + assertThatThrownBy(() -> JmxScraper.argsToConfig(asList(args))) .isInstanceOf(InvalidArgumentException.class); } @Test + @DisabledOnOs(OS.WINDOWS) void shouldCreateConfig_propertiesLoadedFromFile() throws InvalidArgumentException { // Given + // Windows returns /C:/path/to/file, which is not a valid path for Path.get() in Java. String filePath = ClassLoader.getSystemClassLoader().getResource("validConfig.properties").getPath(); - List args = Arrays.asList("-config", filePath); + List args = asList("-config", filePath); // When Properties parsedConfig = JmxScraper.argsToConfig(args); @@ -69,7 +73,7 @@ void shouldCreateConfig_propertiesLoadedFromStdIn() throws InvalidArgumentExcept ClassLoader.getSystemClassLoader().getResourceAsStream("validConfig.properties")) { // Given System.setIn(stream); - List args = Arrays.asList("-config", "-"); + List args = asList("-config", "-"); // When Properties parsedConfig = JmxScraper.argsToConfig(args); diff --git a/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizerTest.java b/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizerTest.java index 6ee1e7ba1..77ce948cc 100644 --- a/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizerTest.java +++ b/jmx-scraper/src/test/java/io/opentelemetry/contrib/jmxscraper/config/PropertiesCustomizerTest.java @@ -17,16 +17,20 @@ class PropertiesCustomizerTest { + private static final String DUMMY_URL = "service:jmx:rmi:///jndi/rmi://host:999/jmxrmi"; + @Test - void tryGetConfigBeforeApply() { + void tryGetBeforeApply() { assertThatThrownBy(() -> new PropertiesCustomizer().getScraperConfig()) .isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> new PropertiesCustomizer().getConnectorBuilder()) + .isInstanceOf(IllegalStateException.class); } @Test void defaultOtlpExporter() { Map map = new HashMap<>(); - map.put("otel.jmx.service.url", "dummy-url"); + map.put("otel.jmx.service.url", DUMMY_URL); map.put("otel.jmx.target.system", "jvm"); ConfigProperties config = DefaultConfigProperties.createFromMap(map); @@ -37,7 +41,7 @@ void defaultOtlpExporter() { @Test void explicitExporterSet() { Map map = new HashMap<>(); - map.put("otel.jmx.service.url", "dummy-url"); + map.put("otel.jmx.service.url", DUMMY_URL); map.put("otel.jmx.target.system", "jvm"); map.put("otel.metrics.exporter", "otlp,logging"); ConfigProperties config = DefaultConfigProperties.createFromMap(map); @@ -49,7 +53,7 @@ void explicitExporterSet() { @Test void getSomeConfiguration() { Map map = new HashMap<>(); - map.put("otel.jmx.service.url", "dummy-url"); + map.put("otel.jmx.service.url", DUMMY_URL); map.put("otel.jmx.target.system", "jvm"); map.put("otel.metrics.exporter", "otlp"); ConfigProperties config = DefaultConfigProperties.createFromMap(map); @@ -67,7 +71,7 @@ void getSomeConfiguration() { @Test void setSdkMetricExportFromJmxInterval() { Map map = new HashMap<>(); - map.put("otel.jmx.service.url", "dummy-url"); + map.put("otel.jmx.service.url", DUMMY_URL); map.put("otel.jmx.target.system", "jvm"); map.put("otel.metrics.exporter", "otlp"); map.put("otel.jmx.interval.milliseconds", "10000"); @@ -83,7 +87,7 @@ void setSdkMetricExportFromJmxInterval() { @Test void sdkMetricExportIntervalPriority() { Map map = new HashMap<>(); - map.put("otel.jmx.service.url", "dummy-url"); + map.put("otel.jmx.service.url", DUMMY_URL); map.put("otel.jmx.target.system", "jvm"); map.put("otel.metrics.exporter", "otlp"); map.put("otel.jmx.interval.milliseconds", "10000"); diff --git a/kafka-exporter/build.gradle.kts b/kafka-exporter/build.gradle.kts index ad18818fe..5f189f97a 100644 --- a/kafka-exporter/build.gradle.kts +++ b/kafka-exporter/build.gradle.kts @@ -4,12 +4,17 @@ plugins { } description = "SpanExporter based on Kafka" -otelJava.moduleName.set("io.opentelemetry.contrib.kafka") + +otelJava { + moduleName.set("io.opentelemetry.contrib.kafka") + // kafka 4 requires java 11 + minJavaVersionSupported.set(JavaVersion.VERSION_11) +} dependencies { api("io.opentelemetry:opentelemetry-sdk-trace") api("io.opentelemetry:opentelemetry-sdk-common") - api("io.opentelemetry.proto:opentelemetry-proto:1.5.0-alpha") + api("io.opentelemetry.proto:opentelemetry-proto:1.8.0-alpha") api("org.apache.kafka:kafka-clients") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") diff --git a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporter.java b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporter.java index b5559832e..28e123fe8 100644 --- a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporter.java +++ b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporter.java @@ -26,7 +26,7 @@ @ThreadSafe @SuppressWarnings("FutureReturnValueIgnored") -public class KafkaSpanExporter implements SpanExporter { +public final class KafkaSpanExporter implements SpanExporter { private static final Logger logger = LoggerFactory.getLogger(KafkaSpanExporter.class); private final String topicName; private final Producer> producer; @@ -59,20 +59,28 @@ public CompletableResultCode export(@Nonnull Collection spans) { CompletableResultCode result = new CompletableResultCode(); CompletableFuture.runAsync( - () -> - producer.send( - producerRecord, - (metadata, exception) -> { - if (exception == null) { - result.succeed(); - } else { - logger.error( - String.format("Error while sending spans to Kafka topic %s", topicName), - exception); - result.fail(); - } - }), - executorService); + () -> + producer.send( + producerRecord, + (metadata, exception) -> { + if (exception == null) { + result.succeed(); + } else { + logger.error( + String.format("Error while sending spans to Kafka topic %s", topicName), + exception); + result.fail(); + } + }), + executorService) + .whenComplete( + (ignore, exception) -> { + if (exception != null) { + logger.error( + "Executor task failed while sending to Kafka topic {}", topicName, exception); + result.fail(); + } + }); return result; } diff --git a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilder.java b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilder.java index e6b49d6dc..d7f1d3c17 100644 --- a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilder.java +++ b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilder.java @@ -20,7 +20,7 @@ import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.serialization.Serializer; -public class KafkaSpanExporterBuilder { +public final class KafkaSpanExporterBuilder { private static final long DEFAULT_TIMEOUT_IN_SECONDS = 5L; private String topicName; private Producer> producer; diff --git a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataDeserializer.java b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataDeserializer.java index 4c5ff7112..9da3f266f 100644 --- a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataDeserializer.java +++ b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataDeserializer.java @@ -11,7 +11,7 @@ import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.serialization.Deserializer; -public class SpanDataDeserializer implements Deserializer { +public final class SpanDataDeserializer implements Deserializer { @SuppressWarnings("NullAway") @Override public ExportTraceServiceRequest deserialize(String topic, byte[] data) { diff --git a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataSerializer.java b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataSerializer.java index 4c689f16a..8f31eb412 100644 --- a/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataSerializer.java +++ b/kafka-exporter/src/main/java/io/opentelemetry/contrib/kafka/SpanDataSerializer.java @@ -20,7 +20,7 @@ import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.serialization.Serializer; -public class SpanDataSerializer implements Serializer> { +public final class SpanDataSerializer implements Serializer> { @Override public byte[] serialize(String topic, Collection data) { if (Objects.isNull(data)) { diff --git a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilderTest.java b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilderTest.java index a7a5239ee..8c520514e 100644 --- a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilderTest.java +++ b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterBuilderTest.java @@ -9,8 +9,8 @@ import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.google.common.collect.ImmutableMap; import io.opentelemetry.sdk.trace.data.SpanData; @@ -49,7 +49,7 @@ void buildWithSerializersInSetters() { .build()) .build(); - assertNotNull(actual); + assertThat(actual).isNotNull(); actual.close(); } @@ -74,7 +74,7 @@ void buildWithSerializersInConfig() { .build()) .build(); - assertNotNull(actual); + assertThat(actual).isNotNull(); actual.close(); } @@ -91,33 +91,32 @@ void buildWithMissingTopic() { VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerMock.getClass().getName()); - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setProducer( - KafkaSpanExporterBuilder.ProducerBuilder.newInstance() - .setConfig(producerConfig) - .build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setProducer( + KafkaSpanExporterBuilder.ProducerBuilder.newInstance() + .setConfig(producerConfig) + .build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test void buildWithMissingProducer() { - assertThrows( - IllegalArgumentException.class, - () -> new KafkaSpanExporterBuilder().setTopicName("a-topic").build()); + assertThatThrownBy(() -> new KafkaSpanExporterBuilder().setTopicName("a-topic").build()) + .isInstanceOf(IllegalArgumentException.class); } @Test void buildWithMissingProducerConfig() { - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setTopicName("a-topic") - .setProducer(KafkaSpanExporterBuilder.ProducerBuilder.newInstance().build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setTopicName("a-topic") + .setProducer(KafkaSpanExporterBuilder.ProducerBuilder.newInstance().build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -129,16 +128,16 @@ void buildWithMissingSerializers() { ProducerConfig.CLIENT_ID_CONFIG, "some clientId"); - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setTopicName("a-topic") - .setProducer( - KafkaSpanExporterBuilder.ProducerBuilder.newInstance() - .setConfig(producerConfig) - .build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setTopicName("a-topic") + .setProducer( + KafkaSpanExporterBuilder.ProducerBuilder.newInstance() + .setConfig(producerConfig) + .build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -152,17 +151,17 @@ void buildWithKeySerializerInConfigAndValueSerializerInSetter() { KEY_SERIALIZER_CLASS_CONFIG, keySerializerMock.getClass().getName()); - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setTopicName("a-topic") - .setProducer( - KafkaSpanExporterBuilder.ProducerBuilder.newInstance() - .setConfig(producerConfig) - .setValueSerializer(valueSerializerMock) - .build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setTopicName("a-topic") + .setProducer( + KafkaSpanExporterBuilder.ProducerBuilder.newInstance() + .setConfig(producerConfig) + .setValueSerializer(valueSerializerMock) + .build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -176,17 +175,17 @@ void buildWithValueSerializerInConfigAndKeySerializerInSetter() { VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerMock.getClass().getName()); - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setTopicName("a-topic") - .setProducer( - KafkaSpanExporterBuilder.ProducerBuilder.newInstance() - .setConfig(producerConfig) - .setKeySerializer(keySerializerMock) - .build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setTopicName("a-topic") + .setProducer( + KafkaSpanExporterBuilder.ProducerBuilder.newInstance() + .setConfig(producerConfig) + .setKeySerializer(keySerializerMock) + .build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -202,17 +201,17 @@ void buildWithSerializersInConfigAndSetters() { VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerMock.getClass().getName()); - assertThrows( - IllegalArgumentException.class, - () -> - new KafkaSpanExporterBuilder() - .setTopicName("a-topic") - .setProducer( - KafkaSpanExporterBuilder.ProducerBuilder.newInstance() - .setConfig(producerConfig) - .setKeySerializer(keySerializerMock) - .setValueSerializer(valueSerializerMock) - .build()) - .build()); + assertThatThrownBy( + () -> + new KafkaSpanExporterBuilder() + .setTopicName("a-topic") + .setProducer( + KafkaSpanExporterBuilder.ProducerBuilder.newInstance() + .setConfig(producerConfig) + .setKeySerializer(keySerializerMock) + .setValueSerializer(valueSerializerMock) + .build()) + .build()) + .isInstanceOf(IllegalArgumentException.class); } } diff --git a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterIntegrationTest.java b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterIntegrationTest.java index 4514efdcf..f8c010f89 100644 --- a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterIntegrationTest.java +++ b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/KafkaSpanExporterIntegrationTest.java @@ -19,6 +19,7 @@ import io.opentelemetry.sdk.common.CompletableResultCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.time.Duration; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -29,7 +30,9 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; @@ -46,7 +49,7 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class KafkaSpanExporterIntegrationTest { private static final DockerImageName KAFKA_TEST_IMAGE = - DockerImageName.parse("apache/kafka:3.8.1"); + DockerImageName.parse("apache/kafka:3.9.1"); private static final String TOPIC = "span_topic"; private KafkaContainer kafka; private KafkaConsumer consumer; @@ -155,6 +158,28 @@ void exportWhenProducerInError() { testSubject.shutdown(); } + @Test + void exportWhenProducerFailsToSend() { + var mockProducer = new MockProducer>(); + mockProducer.sendException = new KafkaException("Simulated kafka exception"); + var testSubjectWithMockProducer = + KafkaSpanExporter.newBuilder().setTopicName(TOPIC).setProducer(mockProducer).build(); + + ImmutableList spans = + ImmutableList.of(makeBasicSpan("span-1"), makeBasicSpan("span-2")); + + CompletableResultCode actual = testSubjectWithMockProducer.export(spans); + + await() + .untilAsserted( + () -> { + assertThat(actual.isSuccess()).isFalse(); + assertThat(actual.isDone()).isTrue(); + }); + + testSubjectWithMockProducer.shutdown(); + } + @Test void flush() { CompletableResultCode actual = testSubject.flush(); diff --git a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataDeserializerTest.java b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataDeserializerTest.java index 395fd357d..46636a247 100644 --- a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataDeserializerTest.java +++ b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataDeserializerTest.java @@ -5,9 +5,7 @@ package io.opentelemetry.contrib.kafka; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest; import io.opentelemetry.proto.resource.v1.Resource; @@ -38,16 +36,16 @@ void deserialize() { ExportTraceServiceRequest actual = testSubject.deserialize("test-topic", data); - assertEquals(request, actual); + assertThat(actual).isEqualTo(request); } @Test void deserializeNullData() { - assertNull(testSubject.deserialize("test-topic", null)); + assertThat(testSubject.deserialize("test-topic", null)).isNull(); } @Test void deserializeEmptyData() { - assertNotNull(testSubject.deserialize("test-topic", new byte[0])); + assertThat(testSubject.deserialize("test-topic", new byte[0])).isNotNull(); } } diff --git a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataSerializerTest.java b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataSerializerTest.java index 517610724..06c3df63e 100644 --- a/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataSerializerTest.java +++ b/kafka-exporter/src/test/java/io/opentelemetry/contrib/kafka/SpanDataSerializerTest.java @@ -6,8 +6,7 @@ package io.opentelemetry.contrib.kafka; import static io.opentelemetry.contrib.kafka.TestUtil.makeBasicSpan; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.assertj.core.api.Assertions.assertThat; import com.google.common.collect.ImmutableList; import io.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest; @@ -28,14 +27,14 @@ void serialize() { byte[] actual = testSubject.serialize("test-topic", spans); - assertNotNull(actual); + assertThat(actual).isNotNull(); } @Test void serializeEmptyData() { byte[] actual = testSubject.serialize("test-topic", Collections.emptySet()); - assertEquals(0, actual.length); + assertThat(actual).isEmpty(); } @Test @@ -46,16 +45,18 @@ void convertSpansToRequest() { ExportTraceServiceRequest actual = testSubject.convertSpansToRequest(spans); - assertNotNull(actual); - assertEquals("span-1", actual.getResourceSpans(0).getScopeSpans(0).getSpans(0).getName()); - assertEquals("span-2", actual.getResourceSpans(0).getScopeSpans(0).getSpans(1).getName()); + assertThat(actual).isNotNull(); + assertThat(actual.getResourceSpans(0).getScopeSpans(0).getSpans(0).getName()) + .isEqualTo("span-1"); + assertThat(actual.getResourceSpans(0).getScopeSpans(0).getSpans(1).getName()) + .isEqualTo("span-2"); } @Test void convertSpansToRequestForEmptySpans() { ExportTraceServiceRequest actual = testSubject.convertSpansToRequest(Collections.emptySet()); - assertNotNull(actual); - assertEquals(ExportTraceServiceRequest.getDefaultInstance(), actual); + assertThat(actual).isNotNull(); + assertThat(actual).isEqualTo(ExportTraceServiceRequest.getDefaultInstance()); } } diff --git a/maven-extension/README.md b/maven-extension/README.md index 3ae23c7f9..fd0150355 100644 --- a/maven-extension/README.md +++ b/maven-extension/README.md @@ -12,7 +12,7 @@ The Maven OpenTelemetry Extension is configured using environment variables or J * (since Maven 3.3.1) configuring the extension in `.mvn/extensions.xml`. In the code snippets below, replace `OPENTELEMETRY_MAVEN_VERSION` with the [latest -release](https://search.maven.org/search?q=g:io.opentelemetry.contrib%20AND%20a:opentelemetry-maven-extension). +release](https://central.sonatype.com/artifact/io.opentelemetry.contrib/opentelemetry-maven-extension). ### Adding the extension to the classpath @@ -61,14 +61,15 @@ Without this setting, the traces won't be exported and the OpenTelemetry Maven E The Maven OpenTelemetry Extension supports a subset of the [OpenTelemetry autoconfiguration environment variables and JVM system properties](https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure). -| System property
Environment variable | Default value | Description | -|--------------------------------------------------------------------------------------------|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| -| `otel.traces.exporter`
`OTEL_TRACES_EXPORTER` | `none` | Select the OpenTelemetry exporter for tracing, the currently only supported values are `none` and `otlp`. `none` makes the instrumentation NoOp | -| `otel.exporter.otlp.endpoint`
`OTEL_EXPORTER_OTLP_ENDPOINT` | `http://localhost:4317` | The OTLP traces and metrics endpoint to connect to. Must be a URL with a scheme of either `http` or `https` based on the use of TLS. | -| `otel.exporter.otlp.headers`
`OTEL_EXPORTER_OTLP_HEADERS` | | Key-value pairs separated by commas to pass as request headers on OTLP trace and metrics requests. | -| `otel.exporter.otlp.timeout`
`OTEL_EXPORTER_OTLP_TIMEOUT` | `10000` | The maximum waiting time, in milliseconds, allowed to send each OTLP trace and metric batch. | -| `otel.resource.attributes`
`OTEL_RESOURCE_ATTRIBUTES` | | Specify resource attributes in the following format: key1=val1,key2=val2,key3=val3 | -| `otel.instrumentation.maven.mojo.enabled`
`OTEL_INSTRUMENTATION_MAVEN_MOJO_ENABLED` | `true` | Whether to create spans for mojo goal executions, `true` or `false`. Can be configured to reduce the number of spans created for large builds. | +| System property
Environment variable | Default value | Description | +|----------------------------------------------------------------------------------------------------|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| +| `otel.traces.exporter`
`OTEL_TRACES_EXPORTER` | `none` | Select the OpenTelemetry exporter for tracing, the currently only supported values are `none` and `otlp`. `none` makes the instrumentation NoOp | +| `otel.exporter.otlp.endpoint`
`OTEL_EXPORTER_OTLP_ENDPOINT` | `http://localhost:4317` | The OTLP traces and metrics endpoint to connect to. Must be a URL with a scheme of either `http` or `https` based on the use of TLS. | +| `otel.exporter.otlp.headers`
`OTEL_EXPORTER_OTLP_HEADERS` | | Key-value pairs separated by commas to pass as request headers on OTLP trace and metrics requests. | +| `otel.exporter.otlp.timeout`
`OTEL_EXPORTER_OTLP_TIMEOUT` | `10000` | The maximum waiting time, in milliseconds, allowed to send each OTLP trace and metric batch. | +| `otel.resource.attributes`
`OTEL_RESOURCE_ATTRIBUTES` | | Specify resource attributes in the following format: key1=val1,key2=val2,key3=val3 | +| `otel.instrumentation.maven.mojo.enabled`
`OTEL_INSTRUMENTATION_MAVEN_MOJO_ENABLED` | `true` | Whether to create spans for mojo goal executions, `true` or `false`. Can be configured to reduce the number of spans created for large builds. | +| `otel.instrumentation.maven.transfer.enabled`
`OTEL_INSTRUMENTATION_MAVEN_TRANSFER_ENABLED` | `false` | Whether to create spans for artifact transfers, `true` or `false`. Can be activated to understand impact of artifact transfers on performances. | ℹ️ The `service.name` is set to `maven` and the `service.version` to the version of the Maven runtime in use. @@ -108,8 +109,8 @@ In addition to the span attributes captured on every Maven plugin goal executio |----------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------| | `http.method` | string | `POST` | | `http.url` | string | Base URL of the uploaded artifact `${maven.build.repository.url}/${groupId}/${artifactId}/${version}` where the `.` of `${groupId}` are replaced by `/` | -| `maven.build.repository.id` | string | ID of the Maven repository to which the artifact is deployed. See [Maven POM reference / Repository](https://maven.apache.org/pom.html#repository) | -| `maven.build.repository.url` | string | URL of the Maven repository to which the artifact is deployed. See [Maven POM reference / Repository](https://maven.apache.org/pom.html#repository) | +| `maven.build.repository.id` | string | ID of the Maven repository to which the artifact is deployed. See [Maven POM reference / Repository](https://maven.apache.org/pom.html#Repository) | +| `maven.build.repository.url` | string | URL of the Maven repository to which the artifact is deployed. See [Maven POM reference / Repository](https://maven.apache.org/pom.html#Repository) | | `peer.service` | string | Maven repository hostname deduced from the Repository URL | The `span.kind` is set to `client` @@ -199,7 +200,7 @@ Steps to instrument a Maven Mojo: * Add the OpenTelemetry API dependency in the `pom.xml` of the Maven plugin. Replace `OPENTELEMETRY_VERSION` with the [latest - release](https://search.maven.org/search?q=g:io.opentelemetry%20AND%20a:opentelemetry-api). + release](https://central.sonatype.com/artifact/io.opentelemetry/opentelemetry-api). ```xml diff --git a/maven-extension/build.gradle.kts b/maven-extension/build.gradle.kts index 4304534f8..a100335ee 100644 --- a/maven-extension/build.gradle.kts +++ b/maven-extension/build.gradle.kts @@ -1,12 +1,12 @@ plugins { id("java") - id("com.github.johnrengelman.shadow") + id("com.gradleup.shadow") id("otel.java-conventions") id("otel.publish-conventions") } // NOTE -// `META-INF/sis/javax.inject.Named` is manually handled under src/main/resources because there is +// `META-INF/sisu/javax.inject.Named` is manually handled under src/main/resources because there is // no Gradle equivalent to the Maven plugin `org.eclipse.sisu:sisu-maven-plugin` description = "Maven extension to observe Maven builds with distributed traces using OpenTelemetry SDK" @@ -16,6 +16,7 @@ dependencies { compileOnly("javax.inject:javax.inject:1") implementation("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") implementation("io.opentelemetry:opentelemetry-sdk") implementation("io.opentelemetry:opentelemetry-sdk-trace") implementation("io.opentelemetry:opentelemetry-sdk-metrics") @@ -24,7 +25,7 @@ dependencies { implementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") implementation("io.opentelemetry:opentelemetry-exporter-otlp") implementation("io.opentelemetry.semconv:opentelemetry-semconv") - testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") + implementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") annotationProcessor("com.google.auto.value:auto-value") compileOnly("com.google.auto.value:auto-value-annotations") @@ -32,6 +33,10 @@ dependencies { compileOnly("org.apache.maven:maven-core:3.5.0") // do not auto-update, support older mvn versions compileOnly("org.slf4j:slf4j-api") + testImplementation("io.opentelemetry:opentelemetry-sdk-testing") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") + testImplementation("io.opentelemetry:opentelemetry-exporter-logging") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") testImplementation("org.apache.maven:maven-core:3.5.0") testImplementation("org.slf4j:slf4j-simple") } diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/ChainedTransferListener.java b/maven-extension/src/main/java/io/opentelemetry/maven/ChainedTransferListener.java new file mode 100644 index 000000000..692d1aff3 --- /dev/null +++ b/maven-extension/src/main/java/io/opentelemetry/maven/ChainedTransferListener.java @@ -0,0 +1,70 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.maven; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.eclipse.aether.transfer.TransferCancelledException; +import org.eclipse.aether.transfer.TransferEvent; +import org.eclipse.aether.transfer.TransferListener; + +/** + * Util class to chain multiple {@link TransferListener} as Maven APIs don't offer this capability. + */ +final class ChainedTransferListener implements TransferListener { + + private final List listeners; + + /** + * @param listeners {@code null} values are filtered + */ + ChainedTransferListener(TransferListener... listeners) { + this.listeners = Arrays.stream(listeners).filter(e -> e != null).collect(Collectors.toList()); + } + + @Override + public void transferInitiated(TransferEvent event) throws TransferCancelledException { + for (TransferListener listener : this.listeners) { + listener.transferInitiated(event); + } + } + + @Override + public void transferStarted(TransferEvent event) throws TransferCancelledException { + for (TransferListener listener : this.listeners) { + listener.transferStarted(event); + } + } + + @Override + public void transferProgressed(TransferEvent event) throws TransferCancelledException { + for (TransferListener listener : this.listeners) { + listener.transferProgressed(event); + } + } + + @Override + public void transferCorrupted(TransferEvent event) throws TransferCancelledException { + for (TransferListener listener : this.listeners) { + listener.transferCorrupted(event); + } + } + + @Override + public void transferSucceeded(TransferEvent event) { + for (TransferListener listener : this.listeners) { + listener.transferSucceeded(event); + } + } + + @Override + public void transferFailed(TransferEvent event) { + for (TransferListener listener : this.listeners) { + listener.transferFailed(event); + } + } +} diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/OpenTelemetrySdkService.java b/maven-extension/src/main/java/io/opentelemetry/maven/OpenTelemetrySdkService.java index a357b3177..0d2e5150e 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/OpenTelemetrySdkService.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/OpenTelemetrySdkService.java @@ -49,10 +49,12 @@ public final class OpenTelemetrySdkService implements Closeable { private final boolean mojosInstrumentationEnabled; + private final boolean transferInstrumentationEnabled; + private boolean disposed; public OpenTelemetrySdkService() { - logger.debug( + logger.info( "OpenTelemetry: Initialize OpenTelemetrySdkService v{}...", MavenOtelSemanticAttributes.TELEMETRY_DISTRO_VERSION_VALUE); @@ -76,6 +78,8 @@ public OpenTelemetrySdkService() { this.mojosInstrumentationEnabled = configProperties.getBoolean("otel.instrumentation.maven.mojo.enabled", true); + this.transferInstrumentationEnabled = + configProperties.getBoolean("otel.instrumentation.maven.transfer.enabled", false); this.tracer = openTelemetrySdk.getTracer("io.opentelemetry.contrib.maven", VERSION); } @@ -154,4 +158,8 @@ public ContextPropagators getPropagators() { public boolean isMojosInstrumentationEnabled() { return mojosInstrumentationEnabled; } + + public boolean isTransferInstrumentationEnabled() { + return transferInstrumentationEnabled; + } } diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/OtelExecutionListener.java b/maven-extension/src/main/java/io/opentelemetry/maven/OtelExecutionListener.java index 8da5c3acd..830412f7f 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/OtelExecutionListener.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/OtelExecutionListener.java @@ -17,12 +17,9 @@ import io.opentelemetry.maven.handler.MojoGoalExecutionHandler; import io.opentelemetry.maven.handler.MojoGoalExecutionHandlerConfiguration; import io.opentelemetry.maven.semconv.MavenOtelSemanticAttributes; -import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; import org.apache.maven.execution.AbstractExecutionListener; import org.apache.maven.execution.ExecutionEvent; import org.apache.maven.execution.ExecutionListener; @@ -338,19 +335,4 @@ public void sessionEnded(ExecutionEvent event) { logger.debug("OpenTelemetry: Maven session ended, end root span"); spanRegistry.removeRootSpan().end(); } - - private static class ToUpperCaseTextMapGetter implements TextMapGetter> { - @Override - public Iterable keys(Map environmentVariables) { - return environmentVariables.keySet(); - } - - @Override - @Nullable - public String get(@Nullable Map environmentVariables, @Nonnull String key) { - return environmentVariables == null - ? null - : environmentVariables.get(key.toUpperCase(Locale.ROOT)); - } - } } diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/OtelLifecycleParticipant.java b/maven-extension/src/main/java/io/opentelemetry/maven/OtelLifecycleParticipant.java index d613f9607..692a70650 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/OtelLifecycleParticipant.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/OtelLifecycleParticipant.java @@ -11,6 +11,9 @@ import org.apache.maven.AbstractMavenLifecycleParticipant; import org.apache.maven.execution.ExecutionListener; import org.apache.maven.execution.MavenSession; +import org.eclipse.aether.DefaultRepositorySystemSession; +import org.eclipse.aether.RepositorySystemSession; +import org.eclipse.aether.transfer.TransferListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -25,6 +28,8 @@ public final class OtelLifecycleParticipant extends AbstractMavenLifecyclePartic private final OtelExecutionListener otelExecutionListener; + private final OtelTransferListener otelTransferListener; + /** * Manually instantiate {@link OtelExecutionListener} and hook it in the Maven build lifecycle * because Maven Sisu doesn't load it when Maven Plexus did. @@ -34,6 +39,14 @@ public final class OtelLifecycleParticipant extends AbstractMavenLifecyclePartic OpenTelemetrySdkService openTelemetrySdkService, SpanRegistry spanRegistry) { this.openTelemetrySdkService = openTelemetrySdkService; this.otelExecutionListener = new OtelExecutionListener(spanRegistry, openTelemetrySdkService); + this.otelTransferListener = new OtelTransferListener(spanRegistry, openTelemetrySdkService); + } + + @Override + public void afterSessionStart(MavenSession session) { + if (openTelemetrySdkService.isTransferInstrumentationEnabled()) { + registerTransferListener(session); + } } /** @@ -43,6 +56,10 @@ public final class OtelLifecycleParticipant extends AbstractMavenLifecyclePartic */ @Override public void afterProjectsRead(MavenSession session) { + registerExecutionListener(session); + } + + void registerExecutionListener(MavenSession session) { ExecutionListener initialExecutionListener = session.getRequest().getExecutionListener(); if (initialExecutionListener instanceof ChainedExecutionListener || initialExecutionListener instanceof OtelExecutionListener) { @@ -64,6 +81,40 @@ public void afterProjectsRead(MavenSession session) { } } + void registerTransferListener(MavenSession session) { + RepositorySystemSession repositorySession = session.getRepositorySession(); + TransferListener initialTransferListener = repositorySession.getTransferListener(); + if (initialTransferListener instanceof ChainedTransferListener + || initialTransferListener instanceof OtelTransferListener) { + // already initialized + logger.debug( + "OpenTelemetry: OpenTelemetry extension already registered as transfer listener, skip."); + } else if (initialTransferListener == null) { + setTransferListener(this.otelTransferListener, repositorySession, session); + logger.debug( + "OpenTelemetry: OpenTelemetry extension registered as transfer listener. No transfer listener initially defined"); + } else { + setTransferListener( + new ChainedTransferListener(this.otelTransferListener, initialTransferListener), + repositorySession, + session); + logger.debug( + "OpenTelemetry: OpenTelemetry extension registered as transfer listener. InitialTransferListener: {}", + initialTransferListener); + } + } + + void setTransferListener( + TransferListener transferListener, + RepositorySystemSession repositorySession, + MavenSession session) { + if (repositorySession instanceof DefaultRepositorySystemSession) { + ((DefaultRepositorySystemSession) repositorySession).setTransferListener(transferListener); + } else { + logger.warn("OpenTelemetry: Cannot set transfer listener"); + } + } + @Override public void afterSessionEnd(MavenSession session) { // Workaround https://issues.apache.org/jira/browse/MNG-8217 diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/OtelTransferListener.java b/maven-extension/src/main/java/io/opentelemetry/maven/OtelTransferListener.java new file mode 100644 index 000000000..53c2aa729 --- /dev/null +++ b/maven-extension/src/main/java/io/opentelemetry/maven/OtelTransferListener.java @@ -0,0 +1,166 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.maven; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanBuilder; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.maven.semconv.MavenOtelSemanticAttributes; +import io.opentelemetry.semconv.HttpAttributes; +import io.opentelemetry.semconv.ServerAttributes; +import io.opentelemetry.semconv.UrlAttributes; +import io.opentelemetry.semconv.incubating.HttpIncubatingAttributes; +import io.opentelemetry.semconv.incubating.UrlIncubatingAttributes; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.maven.execution.ExecutionListener; +import org.apache.maven.execution.MavenSession; +import org.eclipse.aether.transfer.AbstractTransferListener; +import org.eclipse.aether.transfer.TransferEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Don't mark this class as {@link javax.inject.Named} and {@link javax.inject.Singleton} because + * Maven Sisu doesn't automatically load instance of {@link ExecutionListener} as Maven Extension + * hooks the same way Maven Plexus did so we manually hook this instance of {@link + * ExecutionListener} through the {@link OtelLifecycleParticipant#afterProjectsRead(MavenSession)}. + */ +public final class OtelTransferListener extends AbstractTransferListener { + + private static final Logger logger = LoggerFactory.getLogger(OtelTransferListener.class); + + private final SpanRegistry spanRegistry; + + private final OpenTelemetrySdkService openTelemetrySdkService; + + private final Map> repositoryUriMapping = new ConcurrentHashMap<>(); + + OtelTransferListener(SpanRegistry spanRegistry, OpenTelemetrySdkService openTelemetrySdkService) { + this.spanRegistry = spanRegistry; + this.openTelemetrySdkService = openTelemetrySdkService; + } + + @Override + public void transferInitiated(TransferEvent event) { + logger.debug("OpenTelemetry: OtelTransferListener#transferInitiated({})", event); + + String httpRequestMethod; + switch (event.getRequestType()) { + case PUT: + httpRequestMethod = "PUT"; + break; + case GET: + httpRequestMethod = "GET"; + break; + case GET_EXISTENCE: + httpRequestMethod = "HEAD"; + break; + default: + logger.warn( + "OpenTelemetry: Unknown request type {} for event {}", event.getRequestType(), event); + httpRequestMethod = event.getRequestType().name(); + } + + String urlTemplate = + event.getResource().getRepositoryUrl() + + "$groupId/$artifactId/$version/$artifactId-$version.$classifier"; + + String spanName = httpRequestMethod + " " + urlTemplate; + + // Build an HTTP client span as the http call itself is not instrumented. + SpanBuilder spanBuilder = + this.openTelemetrySdkService + .getTracer() + .spanBuilder(spanName) + .setSpanKind(SpanKind.CLIENT) + .setAttribute(HttpAttributes.HTTP_REQUEST_METHOD, httpRequestMethod) + .setAttribute( + UrlAttributes.URL_PATH, + event.getResource().getRepositoryUrl() + event.getResource().getResourceName()) + .setAttribute(UrlIncubatingAttributes.URL_TEMPLATE, urlTemplate) + .setAttribute( + MavenOtelSemanticAttributes.MAVEN_TRANSFER_TYPE, event.getRequestType().name()) + .setAttribute( + MavenOtelSemanticAttributes.MAVEN_RESOURCE_NAME, + event.getResource().getResourceName()); + + repositoryUriMapping + .computeIfAbsent( + event.getResource().getRepositoryUrl(), + str -> { + try { + return str.isEmpty() ? Optional.empty() : Optional.of(new URI(str)); + } catch (URISyntaxException e) { + return Optional.empty(); + } + }) + .ifPresent( + uri -> { + spanBuilder.setAttribute(ServerAttributes.SERVER_ADDRESS, uri.getHost()); + if (uri.getPort() != -1) { + spanBuilder.setAttribute(ServerAttributes.SERVER_PORT, uri.getPort()); + } + // prevent ever increasing size + if (repositoryUriMapping.size() > 128) { + repositoryUriMapping.clear(); + } + }); + spanRegistry.putSpan(spanBuilder.startSpan(), event); + } + + @Override + public void transferSucceeded(TransferEvent event) { + logger.debug("OpenTelemetry: OtelTransferListener#transferSucceeded({})", event); + + Optional.ofNullable(spanRegistry.removeSpan(event)) + .ifPresent( + span -> { + span.setStatus(StatusCode.OK); + finish(span, event); + }); + } + + @Override + public void transferFailed(TransferEvent event) { + logger.debug("OpenTelemetry: OtelTransferListener#transferFailed({})", event); + + Optional.ofNullable(spanRegistry.removeSpan(event)).ifPresent(span -> fail(span, event)); + } + + @Override + public void transferCorrupted(TransferEvent event) { + logger.debug("OpenTelemetry: OtelTransferListener#transferCorrupted({})", event); + + Optional.ofNullable(spanRegistry.removeSpan(event)).ifPresent(span -> fail(span, event)); + } + + void finish(Span span, TransferEvent event) { + switch (event.getRequestType()) { + case PUT: + span.setAttribute( + HttpIncubatingAttributes.HTTP_REQUEST_BODY_SIZE, event.getTransferredBytes()); + break; + case GET: + case GET_EXISTENCE: + span.setAttribute( + HttpIncubatingAttributes.HTTP_RESPONSE_BODY_SIZE, event.getTransferredBytes()); + break; + } + span.end(); + } + + void fail(Span span, TransferEvent event) { + span.setStatus( + StatusCode.ERROR, + Optional.ofNullable(event.getException()).map(Exception::getMessage).orElse("n/a")); + finish(span, event); + } +} diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/SpanRegistry.java b/maven-extension/src/main/java/io/opentelemetry/maven/SpanRegistry.java index 276f1e116..85ef5c2e7 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/SpanRegistry.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/SpanRegistry.java @@ -17,6 +17,9 @@ import org.apache.maven.model.Plugin; import org.apache.maven.plugin.MojoExecution; import org.apache.maven.project.MavenProject; +import org.eclipse.aether.RepositorySystemSession; +import org.eclipse.aether.transfer.TransferEvent; +import org.eclipse.aether.transfer.TransferResource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,6 +38,7 @@ public final class SpanRegistry { private final Map mojoExecutionKeySpanMap = new ConcurrentHashMap<>(); private final Map mavenProjectKeySpanMap = new ConcurrentHashMap<>(); + private final Map transferKeySpanMap = new ConcurrentHashMap<>(); @Nullable private Span rootSpan; /** @@ -113,6 +117,15 @@ public void putSpan(Span span, MojoExecution mojoExecution, MavenProject project } } + public void putSpan(Span span, TransferEvent event) { + TransferKey key = TransferKey.fromTransferEvent(event); + logger.debug("OpenTelemetry: putSpan({})", key); + Span previousSpanForKey = transferKeySpanMap.put(key, span); + if (previousSpanForKey != null) { + logger.warn("A span has already been started for " + key); + } + } + public Span removeSpan(MavenProject mavenProject) { logger.debug("OpenTelemetry: removeSpan({})", mavenProject); MavenProjectKey key = MavenProjectKey.fromMavenProject(mavenProject); @@ -136,6 +149,17 @@ public Span removeSpan(MojoExecution mojoExecution, MavenProject project) { return span; } + public Span removeSpan(TransferEvent event) { + TransferKey key = TransferKey.fromTransferEvent(event); + logger.debug("OpenTelemetry: removeSpan({})", key); + Span span = transferKeySpanMap.remove(key); + if (span == null) { + logger.warn("No span found for " + key); + return Span.getInvalid(); + } + return span; + } + @AutoValue abstract static class MavenProjectKey { abstract String groupId(); @@ -185,4 +209,18 @@ static MojoExecutionKey fromMojoExecution(MojoExecution mojoExecution, MavenProj MavenProjectKey.fromMavenProject(project)); } } + + @AutoValue + abstract static class TransferKey { + abstract String resourceName(); + + abstract String sessionId(); + + public static TransferKey fromTransferEvent(@Nonnull TransferEvent event) { + TransferResource resource = event.getResource(); + RepositorySystemSession session = event.getSession(); + return new AutoValue_SpanRegistry_TransferKey( + resource.getResourceName(), "session-" + System.identityHashCode(session)); + } + } } diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/ToUpperCaseTextMapGetter.java b/maven-extension/src/main/java/io/opentelemetry/maven/ToUpperCaseTextMapGetter.java new file mode 100644 index 000000000..e02b7b3e0 --- /dev/null +++ b/maven-extension/src/main/java/io/opentelemetry/maven/ToUpperCaseTextMapGetter.java @@ -0,0 +1,28 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.maven; + +import io.opentelemetry.context.propagation.TextMapGetter; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +final class ToUpperCaseTextMapGetter implements TextMapGetter> { + @Override + public Set keys(Map environmentVariables) { + return environmentVariables.keySet(); + } + + @Override + @Nullable + public String get(@Nullable Map environmentVariables, @Nonnull String key) { + return environmentVariables == null + ? null + : environmentVariables.get(key.toUpperCase(Locale.ROOT)); + } +} diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceDetector.java b/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceDetector.java new file mode 100644 index 000000000..47ff50759 --- /dev/null +++ b/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceDetector.java @@ -0,0 +1,28 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.maven.resources; + +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; + +public class MavenResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "maven"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + return MavenResourceProvider.create(); + } +} diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceProvider.java b/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceProvider.java index 66e7b028b..9b75d11d0 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceProvider.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/resources/MavenResourceProvider.java @@ -23,6 +23,10 @@ public class MavenResourceProvider implements ResourceProvider { @Override public Resource createResource(ConfigProperties config) { + return create(); + } + + static Resource create() { return Resource.builder() .put(ServiceAttributes.SERVICE_NAME, MavenOtelSemanticAttributes.SERVICE_NAME_VALUE) .put(ServiceAttributes.SERVICE_VERSION, getMavenRuntimeVersion()) diff --git a/maven-extension/src/main/java/io/opentelemetry/maven/semconv/MavenOtelSemanticAttributes.java b/maven-extension/src/main/java/io/opentelemetry/maven/semconv/MavenOtelSemanticAttributes.java index c5d821e31..446e9b87d 100644 --- a/maven-extension/src/main/java/io/opentelemetry/maven/semconv/MavenOtelSemanticAttributes.java +++ b/maven-extension/src/main/java/io/opentelemetry/maven/semconv/MavenOtelSemanticAttributes.java @@ -11,6 +11,7 @@ import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.maven.OpenTelemetrySdkService; import java.util.List; +import org.eclipse.aether.transfer.TransferEvent; /** * Semantic attributes for Maven executions. @@ -36,6 +37,7 @@ public class MavenOtelSemanticAttributes { stringKey("maven.build.repository.id"); public static final AttributeKey MAVEN_BUILD_REPOSITORY_URL = stringKey("maven.build.repository.url"); + public static final AttributeKey MAVEN_EXECUTION_GOAL = stringKey("maven.execution.goal"); public static final AttributeKey MAVEN_EXECUTION_ID = stringKey("maven.execution.id"); @@ -53,6 +55,14 @@ public class MavenOtelSemanticAttributes { public static final AttributeKey MAVEN_PROJECT_VERSION = stringKey("maven.project.version"); + /** See {@link TransferEvent.RequestType}. */ + public static final AttributeKey MAVEN_TRANSFER_TYPE = + AttributeKey.stringKey("maven.transfer.type"); + + /** See {@link org.eclipse.aether.transfer.TransferResource}. */ + public static final AttributeKey MAVEN_RESOURCE_NAME = + AttributeKey.stringKey("maven.resource.name"); + public static final String SERVICE_NAME_VALUE = "maven"; // inlined incubating attribute to prevent direct dependency on incubating semconv diff --git a/maven-extension/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider b/maven-extension/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider new file mode 100644 index 000000000..baac89a6a --- /dev/null +++ b/maven-extension/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider @@ -0,0 +1 @@ +io.opentelemetry.maven.resources.MavenResourceDetector diff --git a/maven-extension/src/test/java/io/opentelemetry/maven/OpenTelemetrySdkServiceTest.java b/maven-extension/src/test/java/io/opentelemetry/maven/OpenTelemetrySdkServiceTest.java index 0174a6283..0b94dce54 100644 --- a/maven-extension/src/test/java/io/opentelemetry/maven/OpenTelemetrySdkServiceTest.java +++ b/maven-extension/src/test/java/io/opentelemetry/maven/OpenTelemetrySdkServiceTest.java @@ -17,11 +17,11 @@ * Note: if otel-java-contrib bumps to Java 11+, we could use junit-pioneer's * {@code @SetSystemProperty} and {@code @ClearSystemProperty} but no bump is planned for now. */ -public class OpenTelemetrySdkServiceTest { +class OpenTelemetrySdkServiceTest { /** Verify default config */ @Test - public void testDefaultConfiguration() { + void testDefaultConfiguration() { System.clearProperty("otel.exporter.otlp.endpoint"); System.clearProperty("otel.service.name"); System.clearProperty("otel.resource.attributes"); @@ -40,7 +40,7 @@ public void testDefaultConfiguration() { /** Verify overwritten `service.name`,`key1` and `key2` */ @Test - public void testOverwrittenResourceAttributes() { + void testOverwrittenResourceAttributes() { System.setProperty("otel.service.name", "my-maven"); System.setProperty("otel.resource.attributes", "key1=val1,key2=val2"); @@ -59,7 +59,7 @@ public void testOverwrittenResourceAttributes() { /** Verify defining `otel.exporter.otlp.endpoint` works */ @Test - public void testOverwrittenExporterConfiguration_1() { + void testOverwrittenExporterConfiguration_1() { System.setProperty("otel.exporter.otlp.endpoint", "https://example.com:4317"); try (OpenTelemetrySdkService openTelemetrySdkService = new OpenTelemetrySdkService()) { @@ -78,7 +78,7 @@ public void testOverwrittenExporterConfiguration_1() { /** Verify defining `otel.exporter.otlp.traces.endpoint` works */ @Test - public void testOverwrittenExporterConfiguration_2() { + void testOverwrittenExporterConfiguration_2() { System.clearProperty("otel.exporter.otlp.endpoint"); System.clearProperty("otel.traces.exporter"); System.setProperty("otel.exporter.otlp.traces.endpoint", "https://example.com:4317/"); @@ -102,7 +102,7 @@ public void testOverwrittenExporterConfiguration_2() { /** Verify defining `otel.exporter.otlp.traces.endpoint` and `otel.traces.exporter` works */ @Test - public void testOverwrittenExporterConfiguration_3() { + void testOverwrittenExporterConfiguration_3() { System.clearProperty("otel.exporter.otlp.endpoint"); System.setProperty("otel.traces.exporter", "otlp"); System.setProperty("otel.exporter.otlp.traces.endpoint", "https://example.com:4317/"); @@ -120,7 +120,7 @@ public void testOverwrittenExporterConfiguration_3() { } finally { System.clearProperty("otel.exporter.otlp.endpoint"); System.clearProperty("otel.exporter.otlp.traces.endpoint"); - System.clearProperty("otel.exporter.otlp.traces.protocol"); + System.clearProperty("otel.traces.exporter"); } } diff --git a/maven-extension/src/test/java/io/opentelemetry/maven/SpanRegistryTest.java b/maven-extension/src/test/java/io/opentelemetry/maven/SpanRegistryTest.java index 864ce8df8..2a01a1754 100644 --- a/maven-extension/src/test/java/io/opentelemetry/maven/SpanRegistryTest.java +++ b/maven-extension/src/test/java/io/opentelemetry/maven/SpanRegistryTest.java @@ -12,11 +12,11 @@ import io.opentelemetry.api.trace.Tracer; import org.junit.jupiter.api.Test; -public class SpanRegistryTest { +class SpanRegistryTest { /** MVND reuses the same Maven process and thus the Span Registry is reused. */ @Test - public void testSpanRegistryReuseWhenUsingMvnDaemon() { + void testSpanRegistryReuseWhenUsingMvnDaemon() { SpanRegistry spanRegistry = new SpanRegistry(); Tracer tracer = OpenTelemetry.noop().getTracer("test"); diff --git a/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerConfigurationTest.java b/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerConfigurationTest.java index 21748cebb..46877575a 100644 --- a/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerConfigurationTest.java +++ b/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerConfigurationTest.java @@ -12,10 +12,9 @@ import java.util.Map; import org.junit.jupiter.api.Test; -public class MojoGoalExecutionHandlerConfigurationTest { - +class MojoGoalExecutionHandlerConfigurationTest { @Test - public void mojoGoalExecutionHandlers() { + void mojoGoalExecutionHandlers() { Map actual = MojoGoalExecutionHandlerConfiguration.loadMojoGoalExecutionHandler( OtelExecutionListener.class.getClassLoader()); diff --git a/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerTest.java b/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerTest.java index b193ccdb5..645ecfa0d 100644 --- a/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerTest.java +++ b/maven-extension/src/test/java/io/opentelemetry/maven/handler/MojoGoalExecutionHandlerTest.java @@ -47,10 +47,10 @@ * https://github.com/takari/takari-lifecycle/blob/master/takari-lifecycle-plugin/src/test/java/io/takari/maven/plugins/plugin/PluginDescriptorMojoTest.java */ @SuppressWarnings({"DeduplicateConstants", "deprecation"}) -public class MojoGoalExecutionHandlerTest { +class MojoGoalExecutionHandlerTest { @Test - public void testMavenDeploy() throws Exception { + void testMavenDeploy() throws Exception { String pomXmlPath = "projects/jar/pom.xml"; String mojoGroupId = "org.apache.maven.plugins"; @@ -92,7 +92,7 @@ public void testMavenDeploy() throws Exception { } @Test - public void testSpringBootBuildImage_springboot_1() throws Exception { + void testSpringBootBuildImage_springboot_1() throws Exception { String pomXmlPath = "projects/springboot_1/pom.xml"; String mojoGroupId = "org.springframework.boot"; @@ -136,7 +136,7 @@ public void testSpringBootBuildImage_springboot_1() throws Exception { } @Test - public void testSpringBootBuildImage_springboot_2() throws Exception { + void testSpringBootBuildImage_springboot_2() throws Exception { String pomXmlPath = "projects/springboot_2/pom.xml"; String mojoGroupId = "org.springframework.boot"; @@ -180,7 +180,7 @@ public void testSpringBootBuildImage_springboot_2() throws Exception { } @Test - public void testGoogleJibBuild_jib_1() throws Exception { + void testGoogleJibBuild_jib_1() throws Exception { String pomXmlPath = "projects/jib_1/pom.xml"; String mojoGroupId = "com.google.cloud.tools"; @@ -221,7 +221,7 @@ public void testGoogleJibBuild_jib_1() throws Exception { } @Test - public void testGoogleJibBuild_jib_2() throws Exception { + void testGoogleJibBuild_jib_2() throws Exception { String pomXmlPath = "projects/jib_2/pom.xml"; String mojoGroupId = "com.google.cloud.tools"; @@ -262,7 +262,7 @@ public void testGoogleJibBuild_jib_2() throws Exception { } @Test - public void testSnykTest_snyk_1() throws Exception { + void testSnykTest_snyk_1() throws Exception { String pomXmlPath = "projects/snyk_1/pom.xml"; String mojoGroupId = "io.snyk"; @@ -298,7 +298,7 @@ public void testSnykTest_snyk_1() throws Exception { } @Test - public void testSnykMonitor_snyk_1() throws Exception { + void testSnykMonitor_snyk_1() throws Exception { String pomXmlPath = "projects/snyk_1/pom.xml"; String mojoGroupId = "io.snyk"; diff --git a/maven-extension/src/test/java/io/opentelemetry/maven/resources/ResourceComponentProviderTest.java b/maven-extension/src/test/java/io/opentelemetry/maven/resources/ResourceComponentProviderTest.java new file mode 100644 index 000000000..940d8dd83 --- /dev/null +++ b/maven-extension/src/test/java/io/opentelemetry/maven/resources/ResourceComponentProviderTest.java @@ -0,0 +1,43 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.maven.resources; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.common.ComponentLoader; +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions; +import org.assertj.core.api.InstanceOfAssertFactory; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + + @Test + @SuppressWarnings("rawtypes") + void providerIsLoaded() { + Iterable providers = + ComponentLoader.forClassLoader(ResourceComponentProviderTest.class.getClassLoader()) + .load(ComponentProvider.class); + assertThat(providers).extracting(ComponentProvider::getName).contains("maven"); + } + + @Test + void endToEnd() { + assertThat( + AutoConfiguredOpenTelemetrySdk.builder() + .build() + .getOpenTelemetrySdk() + .getSdkTracerProvider()) + .extracting("sharedState") + .extracting("resource") + .extracting( + "attributes", + new InstanceOfAssertFactory<>(Attributes.class, OpenTelemetryAssertions::assertThat)) + .containsEntry("telemetry.distro.name", "opentelemetry-maven-extension"); + } +} diff --git a/maven-extension/src/test/resources/declarative-config.yaml b/maven-extension/src/test/resources/declarative-config.yaml new file mode 100644 index 000000000..666fefd36 --- /dev/null +++ b/maven-extension/src/test/resources/declarative-config.yaml @@ -0,0 +1,10 @@ +file_format: "1.0-rc.1" +resource: + detection/development: + detectors: + - maven: +tracer_provider: + processors: + - simple: + exporter: + console: diff --git a/maven-extension/src/test/resources/projects/jib_1/pom.xml b/maven-extension/src/test/resources/projects/jib_1/pom.xml index 36471bc43..d59d51bcc 100644 --- a/maven-extension/src/test/resources/projects/jib_1/pom.xml +++ b/maven-extension/src/test/resources/projects/jib_1/pom.xml @@ -15,7 +15,7 @@ com.google.cloud.tools jib-maven-plugin - 3.4.5 + 3.4.6 docker.io/john/${project.artifactId}:latest diff --git a/maven-extension/src/test/resources/projects/jib_2/pom.xml b/maven-extension/src/test/resources/projects/jib_2/pom.xml index b5bf39fc0..9c34e16df 100644 --- a/maven-extension/src/test/resources/projects/jib_2/pom.xml +++ b/maven-extension/src/test/resources/projects/jib_2/pom.xml @@ -15,7 +15,7 @@ com.google.cloud.tools jib-maven-plugin - 3.4.5 + 3.4.6 gcr.io/my-gcp-project/my-app diff --git a/maven-extension/src/test/resources/projects/springboot_1/pom.xml b/maven-extension/src/test/resources/projects/springboot_1/pom.xml index 79d2dbb80..16201b2d9 100644 --- a/maven-extension/src/test/resources/projects/springboot_1/pom.xml +++ b/maven-extension/src/test/resources/projects/springboot_1/pom.xml @@ -6,7 +6,7 @@ org.springframework.boot spring-boot-starter-parent - 3.4.4 + 3.5.6 io.opentelemetry.contrib.maven.test diff --git a/maven-extension/src/test/resources/projects/springboot_2/pom.xml b/maven-extension/src/test/resources/projects/springboot_2/pom.xml index 1bd86fce0..7b594aa8d 100644 --- a/maven-extension/src/test/resources/projects/springboot_2/pom.xml +++ b/maven-extension/src/test/resources/projects/springboot_2/pom.xml @@ -6,7 +6,7 @@ org.springframework.boot spring-boot-starter-parent - 3.4.4 + 3.5.6 io.opentelemetry.contrib.maven.test diff --git a/micrometer-meter-provider/build.gradle.kts b/micrometer-meter-provider/build.gradle.kts index 99b3fb01b..27e61bf1f 100644 --- a/micrometer-meter-provider/build.gradle.kts +++ b/micrometer-meter-provider/build.gradle.kts @@ -20,14 +20,14 @@ dependencies { annotationProcessor("com.google.auto.value:auto-value") compileOnly("com.google.auto.value:auto-value-annotations") - testImplementation("io.micrometer:micrometer-core:1.14.6") + testImplementation("io.micrometer:micrometer-core:1.15.4") } testing { suites { val integrationTest by registering(JvmTestSuite::class) { dependencies { - implementation("io.micrometer:micrometer-registry-prometheus:1.14.6") + implementation("io.micrometer:micrometer-registry-prometheus:1.15.4") } } } diff --git a/micrometer-meter-provider/src/integrationTest/java/io/opentelemetry/contrib/metrics/micrometer/PrometheusIntegrationTest.java b/micrometer-meter-provider/src/integrationTest/java/io/opentelemetry/contrib/metrics/micrometer/PrometheusIntegrationTest.java index 12be80e01..defefe634 100644 --- a/micrometer-meter-provider/src/integrationTest/java/io/opentelemetry/contrib/metrics/micrometer/PrometheusIntegrationTest.java +++ b/micrometer-meter-provider/src/integrationTest/java/io/opentelemetry/contrib/metrics/micrometer/PrometheusIntegrationTest.java @@ -48,7 +48,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class PrometheusIntegrationTest { +class PrometheusIntegrationTest { private static final AttributeKey KEY1 = AttributeKey.stringKey("key1"); private static final AttributeKey KEY2 = AttributeKey.stringKey("key2"); private static final String VALUE1 = "value1"; diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeter.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeter.java index e8a6b827c..2a080c8e8 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeter.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeter.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.metrics.micrometer; +import static java.util.Objects.requireNonNull; + import io.opentelemetry.api.metrics.DoubleGaugeBuilder; import io.opentelemetry.api.metrics.DoubleHistogramBuilder; import io.opentelemetry.api.metrics.LongCounterBuilder; @@ -15,7 +17,6 @@ import io.opentelemetry.contrib.metrics.micrometer.internal.instruments.MicrometerLongCounter; import io.opentelemetry.contrib.metrics.micrometer.internal.instruments.MicrometerLongUpDownCounter; import io.opentelemetry.contrib.metrics.micrometer.internal.state.MeterSharedState; -import java.util.Objects; final class MicrometerMeter implements Meter { final MeterSharedState meterSharedState; @@ -26,25 +27,25 @@ final class MicrometerMeter implements Meter { @Override public LongCounterBuilder counterBuilder(String name) { - Objects.requireNonNull(name, "name"); + requireNonNull(name, "name"); return MicrometerLongCounter.builder(meterSharedState, name); } @Override public LongUpDownCounterBuilder upDownCounterBuilder(String name) { - Objects.requireNonNull(name, "name"); + requireNonNull(name, "name"); return MicrometerLongUpDownCounter.builder(meterSharedState, name); } @Override public DoubleHistogramBuilder histogramBuilder(String name) { - Objects.requireNonNull(name, "name"); + requireNonNull(name, "name"); return MicrometerDoubleHistogram.builder(meterSharedState, name); } @Override public DoubleGaugeBuilder gaugeBuilder(String name) { - Objects.requireNonNull(name, "name"); + requireNonNull(name, "name"); return MicrometerDoubleGauge.builder(meterSharedState, name); } } diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProvider.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProvider.java index d76e19a34..2817754e2 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProvider.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProvider.java @@ -5,12 +5,13 @@ package io.opentelemetry.contrib.metrics.micrometer; +import static java.util.Objects.requireNonNull; + import io.micrometer.core.instrument.MeterRegistry; import io.opentelemetry.api.metrics.MeterBuilder; import io.opentelemetry.api.metrics.MeterProvider; import io.opentelemetry.contrib.metrics.micrometer.internal.MemoizingSupplier; import io.opentelemetry.contrib.metrics.micrometer.internal.state.MeterProviderSharedState; -import java.util.Objects; import java.util.function.Supplier; /** @@ -44,13 +45,13 @@ public void close() { /** {@inheritDoc} */ @Override public MeterBuilder meterBuilder(String instrumentationScopeName) { - Objects.requireNonNull(instrumentationScopeName, "instrumentationScopeName"); + requireNonNull(instrumentationScopeName, "instrumentationScopeName"); return new MicrometerMeterBuilder(meterProviderSharedState, instrumentationScopeName); } /** Returns a new builder instance for this provider with the specified {@link MeterRegistry}. */ public static MicrometerMeterProviderBuilder builder(MeterRegistry meterRegistry) { - Objects.requireNonNull(meterRegistry, "meterRegistry"); + requireNonNull(meterRegistry, "meterRegistry"); return new MicrometerMeterProviderBuilder(() -> meterRegistry); } @@ -62,7 +63,7 @@ public static MicrometerMeterProviderBuilder builder(MeterRegistry meterRegistry */ public static MicrometerMeterProviderBuilder builder( Supplier meterRegistrySupplier) { - Objects.requireNonNull(meterRegistrySupplier, "meterRegistrySupplier"); + requireNonNull(meterRegistrySupplier, "meterRegistrySupplier"); return new MicrometerMeterProviderBuilder(new MemoizingSupplier<>(meterRegistrySupplier)); } } diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderBuilder.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderBuilder.java index 655c8cd32..25bd8b4d3 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderBuilder.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderBuilder.java @@ -12,7 +12,7 @@ import javax.annotation.Nullable; /** Builder utility class for creating instances of {@link MicrometerMeterProvider}. */ -public class MicrometerMeterProviderBuilder { +public final class MicrometerMeterProviderBuilder { private final Supplier meterRegistrySupplier; @Nullable private CallbackRegistrar callbackRegistrar; diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrar.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrar.java index d9021a234..e3c34b2bb 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrar.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrar.java @@ -5,8 +5,9 @@ package io.opentelemetry.contrib.metrics.micrometer; +import static java.util.Objects.requireNonNull; + import java.util.List; -import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -39,7 +40,7 @@ public final class ScheduledCallbackRegistrar implements CallbackRegistrar { public static ScheduledCallbackRegistrarBuilder builder( ScheduledExecutorService scheduledExecutorService) { - Objects.requireNonNull(scheduledExecutorService, "scheduledExecutorService"); + requireNonNull(scheduledExecutorService, "scheduledExecutorService"); return new ScheduledCallbackRegistrarBuilder(scheduledExecutorService); } diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrarBuilder.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrarBuilder.java index de0da54f7..fb7ff0326 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrarBuilder.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/ScheduledCallbackRegistrarBuilder.java @@ -5,9 +5,10 @@ package io.opentelemetry.contrib.metrics.micrometer; +import static java.util.Objects.requireNonNull; + import com.google.errorprone.annotations.CanIgnoreReturnValue; import java.time.Duration; -import java.util.Objects; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -27,7 +28,7 @@ public final class ScheduledCallbackRegistrarBuilder { /** Sets the period between successive executions of each registered callback */ @CanIgnoreReturnValue public ScheduledCallbackRegistrarBuilder setPeriod(long period, TimeUnit unit) { - Objects.requireNonNull(unit, "unit"); + requireNonNull(unit, "unit"); this.period = period; this.timeUnit = unit; return this; @@ -36,7 +37,7 @@ public ScheduledCallbackRegistrarBuilder setPeriod(long period, TimeUnit unit) { /** Sets the period between successive executions of each registered callback */ @CanIgnoreReturnValue public ScheduledCallbackRegistrarBuilder setPeriod(Duration period) { - Objects.requireNonNull(period, "period"); + requireNonNull(period, "period"); this.period = period.toMillis(); this.timeUnit = TimeUnit.MILLISECONDS; return this; diff --git a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/internal/PollingMeterCallbackRegistrar.java b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/internal/PollingMeterCallbackRegistrar.java index 935a81009..fd422d7fa 100644 --- a/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/internal/PollingMeterCallbackRegistrar.java +++ b/micrometer-meter-provider/src/main/java/io/opentelemetry/contrib/metrics/micrometer/internal/PollingMeterCallbackRegistrar.java @@ -88,7 +88,7 @@ static Iterable of(Runnable callback) { private final Runnable callback; - public PollingIterable(Runnable callback) { + PollingIterable(Runnable callback) { this.callback = callback; } diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderTest.java index f323fbaba..4f1743cbc 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterProviderTest.java @@ -18,7 +18,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerMeterProviderTest { +class MicrometerMeterProviderTest { SimpleMeterRegistry meterRegistry; CallbackRegistrar callbackRegistrar; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterTest.java index 56b173e29..c23ef99dc 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/MicrometerMeterTest.java @@ -31,7 +31,7 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class MicrometerMeterTest { +class MicrometerMeterTest { SimpleMeterRegistry meterRegistry; List callbacks; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleCounterTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleCounterTest.java index fd7585605..5b755da5f 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleCounterTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleCounterTest.java @@ -29,7 +29,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerDoubleCounterTest { +class MicrometerDoubleCounterTest { SimpleMeterRegistry meterRegistry; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleGaugeTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleGaugeTest.java index df358a1b9..393479db7 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleGaugeTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleGaugeTest.java @@ -26,7 +26,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerDoubleGaugeTest { +class MicrometerDoubleGaugeTest { SimpleMeterRegistry meterRegistry; List callbacks; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleHistogramTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleHistogramTest.java index bc88fccc7..dc62b91ee 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleHistogramTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleHistogramTest.java @@ -28,7 +28,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerDoubleHistogramTest { +class MicrometerDoubleHistogramTest { SimpleMeterRegistry meterRegistry; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleUpDownCounterTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleUpDownCounterTest.java index 57cf8922e..536f18d91 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleUpDownCounterTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerDoubleUpDownCounterTest.java @@ -28,7 +28,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerDoubleUpDownCounterTest { +class MicrometerDoubleUpDownCounterTest { SimpleMeterRegistry meterRegistry; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongCounterTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongCounterTest.java index 803cc0952..34423fb14 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongCounterTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongCounterTest.java @@ -30,7 +30,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerLongCounterTest { +class MicrometerLongCounterTest { SimpleMeterRegistry meterRegistry; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongGaugeTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongGaugeTest.java index f55087dcf..96b56b1a9 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongGaugeTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongGaugeTest.java @@ -27,7 +27,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerLongGaugeTest { +class MicrometerLongGaugeTest { SimpleMeterRegistry meterRegistry; List callbacks; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongHistogramTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongHistogramTest.java index 880e7d766..a2651d4b6 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongHistogramTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongHistogramTest.java @@ -28,7 +28,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerLongHistogramTest { +class MicrometerLongHistogramTest { SimpleMeterRegistry meterRegistry; diff --git a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongUpDownCounterTest.java b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongUpDownCounterTest.java index e4bce0d99..bc73dcb5d 100644 --- a/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongUpDownCounterTest.java +++ b/micrometer-meter-provider/src/test/java/io/opentelemetry/contrib/metrics/micrometer/internal/instruments/MicrometerLongUpDownCounterTest.java @@ -29,7 +29,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class MicrometerLongUpDownCounterTest { +class MicrometerLongUpDownCounterTest { SimpleMeterRegistry meterRegistry; diff --git a/mise.toml b/mise.toml new file mode 100644 index 000000000..0e27c5494 --- /dev/null +++ b/mise.toml @@ -0,0 +1,12 @@ +[tools] +lychee = "0.20.1" + +[settings] +# Only install tools explicitly defined in the [tools] section above +idiomatic_version_file_enable_tools = [] + +# Windows configuration for file-based tasks +# Based on: https://github.com/jdx/mise/discussions/4461 +windows_executable_extensions = ["sh"] +windows_default_file_shell_args = "bash" +use_file_shell_for_executable_tasks = true diff --git a/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopContextStorageProvider.java b/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopContextStorageProvider.java index 32a33dde2..358730308 100644 --- a/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopContextStorageProvider.java +++ b/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopContextStorageProvider.java @@ -15,7 +15,7 @@ /** * A {@link ContextStorageProvider} that returns a {@link ContextStorage} which is completely no-op. */ -public class NoopContextStorageProvider implements ContextStorageProvider { +public final class NoopContextStorageProvider implements ContextStorageProvider { /** Returns a no-op context storage. */ @Override diff --git a/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopOpenTelemetry.java b/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopOpenTelemetry.java index b84fef8b1..41da9ca7e 100644 --- a/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopOpenTelemetry.java +++ b/noop-api/src/main/java/io/opentelemetry/contrib/noopapi/NoopOpenTelemetry.java @@ -42,7 +42,7 @@ *

The above will succeed both with the {@linkplain OpenTelemetry#noop() default implementation} * and this one, but with this implementation there will be no overhead at all. */ -public class NoopOpenTelemetry implements OpenTelemetry { +public final class NoopOpenTelemetry implements OpenTelemetry { private static final OpenTelemetry INSTANCE = new NoopOpenTelemetry(); diff --git a/opamp-client/README.md b/opamp-client/README.md index c121aa904..a5aa42fb9 100644 --- a/opamp-client/README.md +++ b/opamp-client/README.md @@ -3,8 +3,59 @@ Java implementation of the OpAMP client [spec](https://github.com/open-telemetry/opamp-spec/blob/main/specification.md). +> [!WARNING] +> This is an incubating feature. Breaking changes can happen on a new release without previous +> notice and without backward compatibility guarantees. + +## Usage + +```java +// Initializing it + +RequestService requestService = HttpRequestService.create(OkHttpSender.create("[OPAMP_SERVICE_URL]")); +// RequestService requestService = WebSocketRequestService.create(OkHttpWebSocket.create("[OPAMP_SERVICE_URL]")); // Use this instead to connect to the server via WebSocket. +OpampClient client = + OpampClient.builder() + .putIdentifyingAttribute("service.name", "My service name") + .enableRemoteConfig() + .setRequestService(requestService) + .build( + new OpampClient.Callbacks() { + @Override + public void onConnect(OpampClient client) {} + + @Override + public void onConnectFailed(OpampClient client, @Nullable Throwable throwable) {} + + @Override + public void onErrorResponse(OpampClient client, ServerErrorResponse errorResponse) {} + + @Override + public void onMessage(OpampClient client, MessageData messageData) { + AgentRemoteConfig remoteConfig = messageData.getRemoteConfig(); + if (remoteConfig != null) { + // A remote config was received + + // After applying it... + client.setRemoteConfigStatus( + new RemoteConfigStatus.Builder() + .status(RemoteConfigStatuses.RemoteConfigStatuses_APPLIED) + .build()); + } + } + }); + +// State update +client.setAgentDescription(new AgentDescription.Builder().build()); + +// App shutdown +client.close(); + +``` + ## Component owners - [Cesar Munoz](https://github.com/LikeTheSalad), Elastic +- [Jack Shirazi](https://github.com/jackshirazi), Elastic Learn more about component owners in [component_owners.yml](../.github.amrom.workers.devponent_owners.yml). diff --git a/opamp-client/build.gradle.kts b/opamp-client/build.gradle.kts index c094faf03..8abbbee65 100644 --- a/opamp-client/build.gradle.kts +++ b/opamp-client/build.gradle.kts @@ -1,11 +1,74 @@ +import java.io.FileOutputStream +import java.io.InputStream +import java.net.URL + plugins { id("otel.java-conventions") + id("otel.publish-conventions") + id("otel.animalsniffer-conventions") + id("com.squareup.wire") version "5.4.0" } description = "Client implementation of the OpAMP spec." otelJava.moduleName.set("io.opentelemetry.contrib.opamp.client") -java { - sourceCompatibility = JavaVersion.VERSION_1_8 - targetCompatibility = JavaVersion.VERSION_1_8 +dependencies { + implementation("com.squareup.okhttp3:okhttp") + implementation("com.github.f4b6a3:uuid-creator") + implementation("io.opentelemetry:opentelemetry-api") + annotationProcessor("com.google.auto.value:auto-value") + compileOnly("com.google.auto.value:auto-value-annotations") + testImplementation("org.mockito:mockito-inline") + testImplementation("com.google.protobuf:protobuf-java-util") + testImplementation("com.squareup.okhttp3:mockwebserver3") + testImplementation("com.squareup.okhttp3:mockwebserver3-junit5") +} + +val opampProtos = tasks.register("opampProtoDownload") { + group = "opamp" + outputProtosDir.set(project.layout.buildDirectory.dir("opamp/protos")) + downloadedZipFile.set(project.layout.buildDirectory.file("intermediate/opampProtoDownload/release.zip")) + zipUrl.set("https://github.com/open-telemetry/opamp-spec/zipball/v0.14.0") +} + +wire { + java {} + sourcePath { + srcDir(opampProtos) + } +} + +abstract class DownloadAndExtractOpampProtos @Inject constructor( + private val archiveOps: ArchiveOperations, + private val fileOps: FileSystemOperations, +) : DefaultTask() { + + @get:OutputDirectory + abstract val outputProtosDir: DirectoryProperty + + @get:Internal + abstract val downloadedZipFile: RegularFileProperty + + @get:Input + abstract val zipUrl: Property + + @TaskAction + fun execute() { + val url = URL(zipUrl.get()) + downloadedZipFile.get().asFile.parentFile.mkdirs() + + url.openStream().use { input: InputStream -> + downloadedZipFile.get().asFile.outputStream().use { output: FileOutputStream -> + input.copyTo(output) + } + } + + val protos = archiveOps.zipTree(downloadedZipFile).matching { + setIncludes(listOf("**/*.proto")) + } + fileOps.sync { + from(protos.files) + into(outputProtosDir) + } + } } diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClient.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClient.java new file mode 100644 index 000000000..735fa3340 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClient.java @@ -0,0 +1,82 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client; + +import io.opentelemetry.opamp.client.internal.response.MessageData; +import java.io.Closeable; +import javax.annotation.Nullable; +import opamp.proto.AgentDescription; +import opamp.proto.RemoteConfigStatus; +import opamp.proto.ServerErrorResponse; + +public interface OpampClient extends Closeable { + + static OpampClientBuilder builder() { + return new OpampClientBuilder(); + } + + /** + * Sets attributes of the Agent. The attributes will be included in the next outgoing status + * report. This is typically used by Agents which allow their AgentDescription to change + * dynamically while the OpAMPClient is started. May be also called from {@link + * Callbacks#onMessage(OpampClient, MessageData)}. + * + * @param agentDescription The new agent description. + */ + void setAgentDescription(AgentDescription agentDescription); + + /** + * Sets the current remote config status which will be sent in the next agent to server request. + * + * @param remoteConfigStatus The new remote config status. + */ + void setRemoteConfigStatus(RemoteConfigStatus remoteConfigStatus); + + interface Callbacks { + /** + * Called when the connection is successfully established to the Server. For WebSocket clients + * this is called after the handshake is completed without any error. For HTTP clients this is + * called for any request if the response status is OK. + * + * @param client The client that's connected. + */ + void onConnect(OpampClient client); + + /** + * Called when the connection to the Server cannot be established. May also be called if the + * connection is lost and reconnection attempt fails. + * + * @param client The client that failed to connect. + * @param throwable The exception. + */ + void onConnectFailed(OpampClient client, @Nullable Throwable throwable); + + /** + * Called when the Server reports an error in response to some previously sent request. Useful + * for logging purposes. The Agent should not attempt to process the error by reconnecting or + * retrying previous operations. The client handles the ErrorResponse_UNAVAILABLE case + * internally by performing retries as necessary. + * + * @param client The client that received an error response. + * @param errorResponse The error returned by the Server. + */ + void onErrorResponse(OpampClient client, ServerErrorResponse errorResponse); + + /** + * Called when the Agent receives a message that needs processing. See {@link MessageData} + * definition for the data that may be available for processing. During onMessage execution the + * {@link OpampClient} functions that change the status of the client may be called, e.g. if + * RemoteConfig is processed then {@link #setRemoteConfigStatus(opamp.proto.RemoteConfigStatus)} + * should be called to reflect the processing result. These functions may also be called after + * onMessage returns. This is advisable if processing can take a long time. In that case + * returning quickly is preferable to avoid blocking the {@link OpampClient}. + * + * @param client The client that received a message. + * @param messageData The server response data that needs processing. + */ + void onMessage(OpampClient client, MessageData messageData); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClientBuilder.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClientBuilder.java new file mode 100644 index 000000000..d6af850fa --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/OpampClientBuilder.java @@ -0,0 +1,483 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client; + +import com.github.f4b6a3.uuid.UuidCreator; +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import io.opentelemetry.opamp.client.internal.connectivity.http.OkHttpSender; +import io.opentelemetry.opamp.client.internal.impl.OpampClientImpl; +import io.opentelemetry.opamp.client.internal.impl.OpampClientState; +import io.opentelemetry.opamp.client.internal.request.service.HttpRequestService; +import io.opentelemetry.opamp.client.internal.request.service.WebSocketRequestService; +import io.opentelemetry.opamp.client.internal.state.State; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import javax.annotation.Nullable; +import opamp.proto.AgentCapabilities; +import opamp.proto.AgentDescription; +import opamp.proto.AnyValue; +import opamp.proto.ArrayValue; +import opamp.proto.KeyValue; +import opamp.proto.RemoteConfigStatus; + +/** Builds an {@link OpampClient} instance. */ +public final class OpampClientBuilder { + private final Map identifyingAttributes = new HashMap<>(); + private final Map nonIdentifyingAttributes = new HashMap<>(); + private long capabilities = 0; + private RequestService service = + HttpRequestService.create(OkHttpSender.create("http://localhost:4320/v1/opamp")); + @Nullable private byte[] instanceUid; + @Nullable private State.EffectiveConfig effectiveConfigState; + + OpampClientBuilder() {} + + /** + * Sets an implementation of a {@link RequestService} to handle the request's sending process. + * There are 2 possible options, either {@link HttpRequestService} to use HTTP, or {@link + * WebSocketRequestService} to use WebSocket. + * + * @param service The request service implementation. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder setRequestService(RequestService service) { + this.service = service; + return this; + } + + /** + * Sets the Agent's instance_uid + * value. A random one is generated by default. + * + * @param value The AgentToServer.instance_uid value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder setInstanceUid(byte[] value) { + this.instanceUid = value; + return this; + } + + /** + * Puts a string attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, String value) { + identifyingAttributes.put(key, createStringValue(value)); + return this; + } + + /** + * Puts a boolean attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, boolean value) { + identifyingAttributes.put(key, createBooleanValue(value)); + return this; + } + + /** + * Puts a long (proto int64) attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, long value) { + identifyingAttributes.put(key, createLongValue(value)); + return this; + } + + /** + * Puts a double attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, double value) { + identifyingAttributes.put(key, createDoubleValue(value)); + return this; + } + + /** + * Puts a string array attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, String... values) { + if (values == null) { + return this; + } + identifyingAttributes.put(key, createArrayValue(createStringValueList(values))); + return this; + } + + /** + * Puts a boolean array attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, boolean... values) { + if (values == null) { + return this; + } + identifyingAttributes.put(key, createArrayValue(createBooleanValueList(values))); + return this; + } + + /** + * Puts a long (proto int64) array attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, long... values) { + if (values == null) { + return this; + } + identifyingAttributes.put(key, createArrayValue(createLongValueList(values))); + return this; + } + + /** + * Puts a double array attribute into the identifying_attributes + * field. + * + * @param key The attribute key. + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putIdentifyingAttribute(String key, double... values) { + if (values == null) { + return this; + } + identifyingAttributes.put(key, createArrayValue(createDoubleValueList(values))); + return this; + } + + /** + * Puts a string attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, String value) { + nonIdentifyingAttributes.put(key, createStringValue(value)); + return this; + } + + /** + * Puts a boolean attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, boolean value) { + nonIdentifyingAttributes.put(key, createBooleanValue(value)); + return this; + } + + /** + * Puts a long (proto int64) attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, long value) { + nonIdentifyingAttributes.put(key, createLongValue(value)); + return this; + } + + /** + * Puts a double attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param value The attribute value. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, double value) { + nonIdentifyingAttributes.put(key, createDoubleValue(value)); + return this; + } + + /** + * Puts a string array attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, String... values) { + if (values == null) { + return this; + } + nonIdentifyingAttributes.put(key, createArrayValue(createStringValueList(values))); + return this; + } + + /** + * Puts a boolean array attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, boolean... values) { + if (values == null) { + return this; + } + nonIdentifyingAttributes.put(key, createArrayValue(createBooleanValueList(values))); + return this; + } + + /** + * Puts a long (proto int64) array attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, long... values) { + if (values == null) { + return this; + } + nonIdentifyingAttributes.put(key, createArrayValue(createLongValueList(values))); + return this; + } + + /** + * Puts a double array attribute into the non_identifying_attributes + * field. + * + * @param key The attribute key + * @param values The attribute values. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder putNonIdentifyingAttribute(String key, double... values) { + if (values == null) { + return this; + } + nonIdentifyingAttributes.put(key, createArrayValue(createDoubleValueList(values))); + return this; + } + + /** + * Adds the AcceptsRemoteConfig and ReportsRemoteConfig capabilities to the Client so that the + * Server can offer remote config values as explained here. + * + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder enableRemoteConfig() { + capabilities = + capabilities + | AgentCapabilities.AgentCapabilities_AcceptsRemoteConfig.getValue() + | AgentCapabilities.AgentCapabilities_ReportsRemoteConfig.getValue(); + return this; + } + + /** + * Adds the ReportsEffectiveConfig capability to the Client so that the Server expects the + * Client's effective config report, as explained here. + * + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder enableEffectiveConfigReporting() { + capabilities = + capabilities | AgentCapabilities.AgentCapabilities_ReportsEffectiveConfig.getValue(); + return this; + } + + /** + * Sets the effective config state implementation. It should call {@link + * State.EffectiveConfig#notifyUpdate()} whenever it has changes that have not been sent to the + * server. + * + * @param effectiveConfigState The state implementation. + * @return this + */ + @CanIgnoreReturnValue + public OpampClientBuilder setEffectiveConfigState(State.EffectiveConfig effectiveConfigState) { + this.effectiveConfigState = effectiveConfigState; + return this; + } + + public OpampClient build(OpampClient.Callbacks callbacks) { + List protoIdentifyingAttributes = new ArrayList<>(); + List protoNonIdentifyingAttributes = new ArrayList<>(); + identifyingAttributes.forEach( + (key, value) -> protoIdentifyingAttributes.add(createKeyValue(key, value))); + nonIdentifyingAttributes.forEach( + (key, value) -> protoNonIdentifyingAttributes.add(createKeyValue(key, value))); + if (instanceUid == null) { + instanceUid = createRandomInstanceUid(); + } + if (effectiveConfigState == null) { + effectiveConfigState = createEffectiveConfigNoop(); + } + OpampClientState state = + new OpampClientState( + new State.RemoteConfigStatus(new RemoteConfigStatus.Builder().build()), + new State.SequenceNum(1L), + new State.AgentDescription( + new AgentDescription.Builder() + .identifying_attributes(protoIdentifyingAttributes) + .non_identifying_attributes(protoNonIdentifyingAttributes) + .build()), + new State.Capabilities(capabilities), + new State.InstanceUid(instanceUid), + new State.Flags(0L), + effectiveConfigState); + return OpampClientImpl.create(service, state, callbacks); + } + + private static State.EffectiveConfig createEffectiveConfigNoop() { + return new State.EffectiveConfig() { + @Nullable + @Override + public opamp.proto.EffectiveConfig get() { + return null; + } + }; + } + + private static AnyValue createStringValue(String value) { + return new AnyValue.Builder().string_value(value).build(); + } + + private static AnyValue createBooleanValue(boolean value) { + return new AnyValue.Builder().bool_value(value).build(); + } + + private static AnyValue createLongValue(long value) { + return new AnyValue.Builder().int_value(value).build(); + } + + private static AnyValue createDoubleValue(double value) { + return new AnyValue.Builder().double_value(value).build(); + } + + private static List createStringValueList(String[] values) { + List anyValues = new ArrayList<>(); + for (String value : values) { + anyValues.add(createStringValue(value)); + } + return anyValues; + } + + private static List createBooleanValueList(boolean[] values) { + List anyValues = new ArrayList<>(); + for (boolean value : values) { + anyValues.add(createBooleanValue(value)); + } + return anyValues; + } + + private static List createLongValueList(long[] values) { + List anyValues = new ArrayList<>(); + for (long value : values) { + anyValues.add(createLongValue(value)); + } + return anyValues; + } + + private static List createDoubleValueList(double[] values) { + List anyValues = new ArrayList<>(); + for (double value : values) { + anyValues.add(createDoubleValue(value)); + } + return anyValues; + } + + private static AnyValue createArrayValue(List values) { + return new AnyValue.Builder() + .array_value(new ArrayValue.Builder().values(values).build()) + .build(); + } + + private static KeyValue createKeyValue(String key, AnyValue value) { + return new KeyValue.Builder().key(key).value(value).build(); + } + + public static byte[] createRandomInstanceUid() { + UUID uuid = UuidCreator.getTimeOrderedEpoch(); + ByteBuffer buffer = ByteBuffer.allocate(16); + buffer.putLong(uuid.getMostSignificantBits()); + buffer.putLong(uuid.getLeastSignificantBits()); + return buffer.array(); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpErrorException.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpErrorException.java new file mode 100644 index 000000000..c1104118c --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpErrorException.java @@ -0,0 +1,27 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.http; + +public class HttpErrorException extends Exception { + private final int errorCode; + + private static final long serialVersionUID = 1L; + + public int getErrorCode() { + return errorCode; + } + + /** + * Constructs an HTTP error related exception. + * + * @param errorCode The HTTP error code. + * @param message The HTTP error message associated with the code. + */ + public HttpErrorException(int errorCode, String message) { + super(message); + this.errorCode = errorCode; + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpSender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpSender.java new file mode 100644 index 000000000..704d172ac --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/HttpSender.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.http; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.CompletableFuture; + +public interface HttpSender { + + CompletableFuture send(BodyWriter writer, int contentLength); + + interface BodyWriter { + void writeTo(OutputStream outputStream) throws IOException; + } + + interface Response extends Closeable { + int statusCode(); + + String statusMessage(); + + InputStream bodyInputStream(); + + String getHeader(String name); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/OkHttpSender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/OkHttpSender.java new file mode 100644 index 000000000..1add016fd --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/OkHttpSender.java @@ -0,0 +1,134 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.http; + +import io.opentelemetry.api.internal.InstrumentationUtil; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.CompletableFuture; +import javax.annotation.Nullable; +import okhttp3.Call; +import okhttp3.Callback; +import okhttp3.MediaType; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.RequestBody; +import okio.BufferedSink; + +public final class OkHttpSender implements HttpSender { + private final OkHttpClient client; + private final String url; + + public static OkHttpSender create(String url) { + return create(url, new OkHttpClient()); + } + + public static OkHttpSender create(String url, OkHttpClient client) { + return new OkHttpSender(url, client); + } + + private static final String CONTENT_TYPE = "application/x-protobuf"; + private static final MediaType MEDIA_TYPE = MediaType.parse(CONTENT_TYPE); + + private OkHttpSender(String url, OkHttpClient client) { + this.url = url; + this.client = client; + } + + @Override + public CompletableFuture send(BodyWriter writer, int contentLength) { + CompletableFuture future = new CompletableFuture<>(); + okhttp3.Request.Builder builder = new okhttp3.Request.Builder().url(url); + builder.addHeader("Content-Type", CONTENT_TYPE); + + RequestBody body = new RawRequestBody(writer, contentLength, MEDIA_TYPE); + builder.post(body); + + // By suppressing instrumentations, we prevent automatic instrumentations for the okhttp request + // that polls the opamp server. + InstrumentationUtil.suppressInstrumentation(() -> doSendRequest(builder.build(), future)); + + return future; + } + + private void doSendRequest(Request request, CompletableFuture future) { + client + .newCall(request) + .enqueue( + new Callback() { + @Override + public void onResponse(Call call, okhttp3.Response response) { + future.complete(new OkHttpResponse(response)); + } + + @Override + public void onFailure(Call call, IOException e) { + future.completeExceptionally(e); + } + }); + } + + private static class OkHttpResponse implements Response { + private final okhttp3.Response response; + + private OkHttpResponse(okhttp3.Response response) { + this.response = response; + } + + @Override + public int statusCode() { + return response.code(); + } + + @Override + public String statusMessage() { + return response.message(); + } + + @Override + public InputStream bodyInputStream() { + return response.body().byteStream(); + } + + @Override + public String getHeader(String name) { + return response.headers().get(name); + } + + @Override + public void close() { + response.close(); + } + } + + private static class RawRequestBody extends RequestBody { + private final BodyWriter writer; + private final int contentLength; + private final MediaType contentType; + + private RawRequestBody(BodyWriter writer, int contentLength, MediaType contentType) { + this.writer = writer; + this.contentLength = contentLength; + this.contentType = contentType; + } + + @Nullable + @Override + public MediaType contentType() { + return contentType; + } + + @Override + public long contentLength() { + return contentLength; + } + + @Override + public void writeTo(BufferedSink bufferedSink) throws IOException { + writer.writeTo(bufferedSink.outputStream()); + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParser.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParser.java new file mode 100644 index 000000000..6cc2d1e3b --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParser.java @@ -0,0 +1,49 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.http; + +import io.opentelemetry.opamp.client.internal.tools.SystemTime; +import java.time.Duration; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Optional; +import java.util.regex.Pattern; + +public final class RetryAfterParser { + private final SystemTime systemTime; + private static final Pattern SECONDS_PATTERN = Pattern.compile("\\d+"); + private static final Pattern DATE_PATTERN = + Pattern.compile( + "[A-Za-z]{3}, [0-3][0-9] [A-Za-z]{3} [0-9]{4} [0-2][0-9]:[0-5][0-9]:[0-5][0-9] GMT"); + private static final DateTimeFormatter DATE_FORMAT = + DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss z", Locale.US); + + public static RetryAfterParser getInstance() { + return new RetryAfterParser(SystemTime.getInstance()); + } + + RetryAfterParser(SystemTime systemTime) { + this.systemTime = systemTime; + } + + public Optional tryParse(String value) { + Duration duration = null; + if (SECONDS_PATTERN.matcher(value).matches()) { + duration = Duration.ofSeconds(Long.parseLong(value)); + } else if (DATE_PATTERN.matcher(value).matches()) { + long difference = toMilliseconds(value) - systemTime.getCurrentTimeMillis(); + if (difference > 0) { + duration = Duration.ofMillis(difference); + } + } + return Optional.ofNullable(duration); + } + + private static long toMilliseconds(String value) { + return ZonedDateTime.parse(value, DATE_FORMAT).toInstant().toEpochMilli(); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/package-info.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/package-info.java new file mode 100644 index 000000000..7bbcdb627 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/http/package-info.java @@ -0,0 +1,9 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +@ParametersAreNonnullByDefault +package io.opentelemetry.opamp.client.internal.connectivity.http; + +import javax.annotation.ParametersAreNonnullByDefault; diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocket.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocket.java new file mode 100644 index 000000000..8ff386251 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocket.java @@ -0,0 +1,116 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.websocket; + +import static java.util.Objects.requireNonNull; + +import java.util.concurrent.atomic.AtomicReference; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import okhttp3.OkHttpClient; +import okhttp3.Response; +import okhttp3.WebSocketListener; +import okio.ByteString; + +public class OkHttpWebSocket implements WebSocket { + private final String url; + private final OkHttpClient client; + private final AtomicReference status = new AtomicReference<>(Status.NOT_RUNNING); + private final AtomicReference webSocket = new AtomicReference<>(); + + public static OkHttpWebSocket create(String url) { + return create(url, new OkHttpClient()); + } + + public static OkHttpWebSocket create(String url, OkHttpClient client) { + return new OkHttpWebSocket(url, client); + } + + private OkHttpWebSocket(String url, OkHttpClient client) { + this.url = url; + this.client = client; + } + + @Override + public void open(Listener listener) { + if (status.compareAndSet(Status.NOT_RUNNING, Status.STARTING)) { + okhttp3.Request request = new okhttp3.Request.Builder().url(url).build(); + webSocket.set(client.newWebSocket(request, new ListenerAdapter(listener))); + } + } + + @Override + public boolean send(byte[] request) { + if (status.get() != Status.RUNNING) { + return false; + } + return getWebSocket().send(ByteString.of(request)); + } + + @Override + public void close(int code, @Nullable String reason) { + if (status.compareAndSet(Status.RUNNING, Status.CLOSING)) { + try { + if (!getWebSocket().close(code, reason)) { + status.set(Status.NOT_RUNNING); + } + } catch (IllegalArgumentException e) { + status.set(Status.RUNNING); + // Re-throwing as this error happens due to a caller error. + throw e; + } + } + } + + private okhttp3.WebSocket getWebSocket() { + return requireNonNull(webSocket.get()); + } + + private class ListenerAdapter extends WebSocketListener { + private final Listener delegate; + + private ListenerAdapter(Listener delegate) { + this.delegate = delegate; + } + + @Override + public void onOpen(@Nonnull okhttp3.WebSocket webSocket, @Nonnull Response response) { + status.set(Status.RUNNING); + delegate.onOpen(); + } + + @Override + public void onClosing(@Nonnull okhttp3.WebSocket webSocket, int code, @Nonnull String reason) { + status.set(Status.CLOSING); + delegate.onClosing(); + } + + @Override + public void onClosed(@Nonnull okhttp3.WebSocket webSocket, int code, @Nonnull String reason) { + status.set(Status.NOT_RUNNING); + delegate.onClosed(); + } + + @Override + public void onFailure( + @Nonnull okhttp3.WebSocket webSocket, @Nonnull Throwable t, @Nullable Response response) { + status.set(Status.NOT_RUNNING); + delegate.onFailure(t); + } + + @Override + public void onMessage(@Nonnull okhttp3.WebSocket webSocket, @Nonnull ByteString bytes) { + delegate.onMessage(bytes.toByteArray()); + } + } + + enum Status { + NOT_RUNNING, + STARTING, + CLOSING, + RUNNING + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/WebSocket.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/WebSocket.java new file mode 100644 index 000000000..56978bbef --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/WebSocket.java @@ -0,0 +1,72 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.websocket; + +import javax.annotation.Nullable; + +public interface WebSocket { + /** + * Starts the websocket connection if it's not yet started or if it has been closed. + * + * @param listener Will receive events from the websocket connection. + */ + void open(Listener listener); + + /** + * Stops the websocket connection if running. Nothing will happen if it's already stopped. + * + * @param code Status code as defined by Section 7.4 of RFC 6455 + * @param reason Reason for shutting down, as explained in Section 5.5.1 of RFC + * 6455 + */ + void close(int code, @Nullable String reason); + + /** + * Sends a message via the websocket connection. + * + * @param request The message payload. + * @return {@code false} If the message can't be dispatched for any reason, whether the websocket + * isn't running, or the connection isn't established, or it's terminated. {@code true} if the + * message can get sent. Returning {@code true} doesn't guarantee that the message will arrive + * at the remote peer. + */ + boolean send(byte[] request); + + interface Listener { + + /** + * Called when the websocket connection is successfully established with the remote peer. The + * client may start sending messages after this method is called. + */ + void onOpen(); + + /** + * Called when the closing handshake has started. No further messages will be sent after this + * method call. + */ + void onClosing(); + + /** Called when the connection is terminated and no further messages can be transmitted. */ + void onClosed(); + + /** + * Called when receiving a message from the remote peer. + * + * @param data The payload sent by the remote peer. + */ + void onMessage(byte[] data); + + /** + * Called when the connection is closed or cannot be established due to an error. No messages + * can be transmitted after this method is called. + * + * @param t The error. + */ + void onFailure(Throwable t); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImpl.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImpl.java new file mode 100644 index 000000000..1441093d7 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImpl.java @@ -0,0 +1,255 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl; + +import io.opentelemetry.opamp.client.OpampClient; +import io.opentelemetry.opamp.client.internal.impl.recipe.AgentToServerAppenders; +import io.opentelemetry.opamp.client.internal.impl.recipe.RecipeManager; +import io.opentelemetry.opamp.client.internal.impl.recipe.RequestRecipe; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDescriptionAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDisconnectAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.CapabilitiesAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.EffectiveConfigAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.FlagsAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.InstanceUidAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.RemoteConfigStatusAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.SequenceNumberAppender; +import io.opentelemetry.opamp.client.internal.request.Field; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.response.MessageData; +import io.opentelemetry.opamp.client.internal.response.OpampServerResponseException; +import io.opentelemetry.opamp.client.internal.response.Response; +import io.opentelemetry.opamp.client.internal.state.ObservableState; +import io.opentelemetry.opamp.client.internal.state.State; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import okio.ByteString; +import opamp.proto.AgentDescription; +import opamp.proto.AgentToServer; +import opamp.proto.RemoteConfigStatus; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerToAgent; +import opamp.proto.ServerToAgentFlags; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class OpampClientImpl + implements OpampClient, ObservableState.Listener, RequestService.Callback, Supplier { + private final RequestService requestService; + private final AgentToServerAppenders appenders; + private final OpampClientState state; + private final RecipeManager recipeManager; + private final AtomicBoolean hasStopped = new AtomicBoolean(false); + private final Callbacks callbacks; + + /** Fields that must always be sent. */ + private static final List REQUIRED_FIELDS; + + /** + * Fields that should only be sent in the first message and then omitted in following messages, + * unless their value changes or the server requests a full message. + * + *

Refer to the + * docs for more details. + */ + private static final List COMPRESSABLE_FIELDS; + + static { + // Required fields init + List constantFields = new ArrayList<>(); + constantFields.add(Field.INSTANCE_UID); + constantFields.add(Field.SEQUENCE_NUM); + constantFields.add(Field.CAPABILITIES); + REQUIRED_FIELDS = Collections.unmodifiableList(constantFields); + + // Compressable fields init + List compressableFields = new ArrayList<>(); + compressableFields.add(Field.AGENT_DESCRIPTION); + compressableFields.add(Field.EFFECTIVE_CONFIG); + compressableFields.add(Field.REMOTE_CONFIG_STATUS); + COMPRESSABLE_FIELDS = Collections.unmodifiableList(compressableFields); + } + + public static OpampClientImpl create( + RequestService requestService, OpampClientState state, Callbacks callbacks) { + AgentToServerAppenders appenders = + new AgentToServerAppenders( + AgentDescriptionAppender.create(state.agentDescription), + EffectiveConfigAppender.create(state.effectiveConfig), + RemoteConfigStatusAppender.create(state.remoteConfigStatus), + SequenceNumberAppender.create(state.sequenceNum), + CapabilitiesAppender.create(state.capabilities), + InstanceUidAppender.create(state.instanceUid), + FlagsAppender.create(state.flags), + AgentDisconnectAppender.create()); + OpampClientImpl client = + new OpampClientImpl( + requestService, appenders, state, RecipeManager.create(REQUIRED_FIELDS), callbacks); + + // Start + requestService.start(client, client); + client.disableCompression(); + client.startObservingStateChange(); + requestService.sendRequest(); + + return client; + } + + private OpampClientImpl( + RequestService requestService, + AgentToServerAppenders appenders, + OpampClientState state, + RecipeManager recipeManager, + Callbacks callbacks) { + this.requestService = requestService; + this.appenders = appenders; + this.state = state; + this.recipeManager = recipeManager; + this.callbacks = callbacks; + } + + @Override + public void close() { + if (hasStopped.compareAndSet(false, true)) { + stopObservingStateChange(); + prepareDisconnectRequest(); + requestService.stop(); + } + } + + @Override + public void setAgentDescription(@Nonnull AgentDescription agentDescription) { + if (!state.agentDescription.get().equals(agentDescription)) { + state.agentDescription.set(agentDescription); + addFieldAndSend(Field.AGENT_DESCRIPTION); + } + } + + @Override + public void setRemoteConfigStatus(@Nonnull RemoteConfigStatus remoteConfigStatus) { + if (!state.remoteConfigStatus.get().equals(remoteConfigStatus)) { + state.remoteConfigStatus.set(remoteConfigStatus); + addFieldAndSend(Field.REMOTE_CONFIG_STATUS); + } + } + + @Override + public void onConnectionSuccess() { + callbacks.onConnect(this); + } + + @Override + public void onConnectionFailed(Throwable throwable) { + callbacks.onConnectFailed(this, throwable); + } + + @Override + public void onRequestSuccess(Response response) { + if (response == null) { + return; + } + + handleResponsePayload(response.getServerToAgent()); + } + + @Override + public void onRequestFailed(Throwable throwable) { + preserveFailedRequestRecipe(); + if (throwable instanceof OpampServerResponseException) { + ServerErrorResponse errorResponse = ((OpampServerResponseException) throwable).errorResponse; + callbacks.onErrorResponse(this, errorResponse); + } + } + + private void preserveFailedRequestRecipe() { + RequestRecipe previous = recipeManager.previous(); + if (previous != null) { + recipeManager.next().merge(previous); + } + } + + private void handleResponsePayload(ServerToAgent response) { + int reportFullState = ServerToAgentFlags.ServerToAgentFlags_ReportFullState.getValue(); + if ((response.flags & reportFullState) == reportFullState) { + disableCompression(); + } + handleAgentIdentification(response); + + boolean notifyOnMessage = false; + MessageData.Builder messageBuilder = MessageData.builder(); + + if (response.remote_config != null) { + notifyOnMessage = true; + messageBuilder.setRemoteConfig(response.remote_config); + } + + if (notifyOnMessage) { + callbacks.onMessage(this, messageBuilder.build()); + } + } + + private void handleAgentIdentification(ServerToAgent response) { + if (response.agent_identification != null) { + ByteString newInstanceUid = response.agent_identification.new_instance_uid; + if (newInstanceUid.size() > 0) { + state.instanceUid.set(newInstanceUid.toByteArray()); + } + } + } + + private void disableCompression() { + recipeManager.next().addAllFields(COMPRESSABLE_FIELDS); + } + + private void prepareDisconnectRequest() { + recipeManager.next().addField(Field.AGENT_DISCONNECT); + } + + @Override + public Request get() { + AgentToServer.Builder builder = new AgentToServer.Builder(); + for (Field field : recipeManager.next().build().getFields()) { + appenders.getForField(field).appendTo(builder); + } + Request request = Request.create(builder.build()); + state.sequenceNum.increment(); + return request; + } + + private void startObservingStateChange() { + for (State state : state.getAll()) { + if (state instanceof ObservableState) { + ((ObservableState) state).addListener(this); + } + } + } + + private void stopObservingStateChange() { + for (State state : state.getAll()) { + if (state instanceof ObservableState) { + ((ObservableState) state).removeListener(this); + } + } + } + + @Override + public void onStateUpdate(Field type) { + addFieldAndSend(type); + } + + private void addFieldAndSend(Field field) { + recipeManager.next().addField(field); + requestService.sendRequest(); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientState.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientState.java new file mode 100644 index 000000000..d8b26beee --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/OpampClientState.java @@ -0,0 +1,58 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl; + +import io.opentelemetry.opamp.client.internal.state.State; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class OpampClientState { + public final State.RemoteConfigStatus remoteConfigStatus; + public final State.SequenceNum sequenceNum; + public final State.AgentDescription agentDescription; + public final State.Capabilities capabilities; + public final State.InstanceUid instanceUid; + public final State.Flags flags; + public final State.EffectiveConfig effectiveConfig; + private final List> items; + + public OpampClientState( + State.RemoteConfigStatus remoteConfigStatus, + State.SequenceNum sequenceNum, + State.AgentDescription agentDescription, + State.Capabilities capabilities, + State.InstanceUid instanceUid, + State.Flags flags, + State.EffectiveConfig effectiveConfig) { + this.remoteConfigStatus = remoteConfigStatus; + this.sequenceNum = sequenceNum; + this.agentDescription = agentDescription; + this.capabilities = capabilities; + this.instanceUid = instanceUid; + this.flags = flags; + this.effectiveConfig = effectiveConfig; + + List> providedItems = new ArrayList<>(); + providedItems.add(remoteConfigStatus); + providedItems.add(sequenceNum); + providedItems.add(agentDescription); + providedItems.add(capabilities); + providedItems.add(instanceUid); + providedItems.add(flags); + providedItems.add(effectiveConfig); + + items = Collections.unmodifiableList(providedItems); + } + + public List> getAll() { + return items; + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppenders.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppenders.java new file mode 100644 index 000000000..bbde2def9 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppenders.java @@ -0,0 +1,73 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe; + +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDescriptionAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDisconnectAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentToServerAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.CapabilitiesAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.EffectiveConfigAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.FlagsAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.InstanceUidAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.RemoteConfigStatusAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.SequenceNumberAppender; +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class AgentToServerAppenders { + public final AgentDescriptionAppender agentDescriptionAppender; + public final EffectiveConfigAppender effectiveConfigAppender; + public final RemoteConfigStatusAppender remoteConfigStatusAppender; + public final SequenceNumberAppender sequenceNumberAppender; + public final CapabilitiesAppender capabilitiesAppender; + public final InstanceUidAppender instanceUidAppender; + public final FlagsAppender flagsAppender; + public final AgentDisconnectAppender agentDisconnectAppender; + private final Map allAppenders; + + public AgentToServerAppenders( + AgentDescriptionAppender agentDescriptionAppender, + EffectiveConfigAppender effectiveConfigAppender, + RemoteConfigStatusAppender remoteConfigStatusAppender, + SequenceNumberAppender sequenceNumberAppender, + CapabilitiesAppender capabilitiesAppender, + InstanceUidAppender instanceUidAppender, + FlagsAppender flagsAppender, + AgentDisconnectAppender agentDisconnectAppender) { + this.agentDescriptionAppender = agentDescriptionAppender; + this.effectiveConfigAppender = effectiveConfigAppender; + this.remoteConfigStatusAppender = remoteConfigStatusAppender; + this.sequenceNumberAppender = sequenceNumberAppender; + this.capabilitiesAppender = capabilitiesAppender; + this.instanceUidAppender = instanceUidAppender; + this.flagsAppender = flagsAppender; + this.agentDisconnectAppender = agentDisconnectAppender; + + Map appenders = new HashMap<>(); + appenders.put(Field.AGENT_DESCRIPTION, agentDescriptionAppender); + appenders.put(Field.EFFECTIVE_CONFIG, effectiveConfigAppender); + appenders.put(Field.REMOTE_CONFIG_STATUS, remoteConfigStatusAppender); + appenders.put(Field.SEQUENCE_NUM, sequenceNumberAppender); + appenders.put(Field.CAPABILITIES, capabilitiesAppender); + appenders.put(Field.INSTANCE_UID, instanceUidAppender); + appenders.put(Field.FLAGS, flagsAppender); + appenders.put(Field.AGENT_DISCONNECT, agentDisconnectAppender); + allAppenders = Collections.unmodifiableMap(appenders); + } + + public AgentToServerAppender getForField(Field type) { + if (!allAppenders.containsKey(type)) { + throw new IllegalArgumentException("Field type " + type + " is not supported"); + } + return allAppenders.get(type); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManager.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManager.java new file mode 100644 index 000000000..86a72f72b --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManager.java @@ -0,0 +1,86 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe; + +import com.google.errorprone.annotations.CanIgnoreReturnValue; +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class RecipeManager { + private final Object recipeLock = new Object(); + private final List constantFields; + @Nullable private RequestRecipe previousRecipe = null; + @Nullable private RecipeBuilder builder; + + public static RecipeManager create(List constantFields) { + return new RecipeManager(Collections.unmodifiableList(constantFields)); + } + + private RecipeManager(List constantFields) { + this.constantFields = constantFields; + } + + @Nullable + public RequestRecipe previous() { + synchronized (recipeLock) { + return previousRecipe; + } + } + + @Nonnull + public RecipeBuilder next() { + synchronized (recipeLock) { + if (builder == null) { + builder = new RecipeBuilder(constantFields); + } + return builder; + } + } + + public final class RecipeBuilder { + private final Set fields = new HashSet<>(); + + @CanIgnoreReturnValue + public RecipeBuilder addField(Field field) { + fields.add(field); + return this; + } + + @CanIgnoreReturnValue + public RecipeBuilder addAllFields(Collection fields) { + this.fields.addAll(fields); + return this; + } + + @CanIgnoreReturnValue + public RecipeBuilder merge(RequestRecipe recipe) { + return addAllFields(recipe.getFields()); + } + + public RequestRecipe build() { + synchronized (recipeLock) { + RequestRecipe recipe = new RequestRecipe(Collections.unmodifiableCollection(fields)); + previousRecipe = recipe; + builder = null; + return recipe; + } + } + + private RecipeBuilder(List initialFields) { + fields.addAll(initialFields); + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RequestRecipe.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RequestRecipe.java new file mode 100644 index 000000000..4887cb1f2 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/RequestRecipe.java @@ -0,0 +1,25 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe; + +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.Collection; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class RequestRecipe { + private final Collection fields; + + public RequestRecipe(Collection fields) { + this.fields = fields; + } + + public Collection getFields() { + return fields; + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDescriptionAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDescriptionAppender.java new file mode 100644 index 000000000..260ebef02 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDescriptionAppender.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentDescription; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class AgentDescriptionAppender implements AgentToServerAppender { + private final Supplier data; + + public static AgentDescriptionAppender create(Supplier data) { + return new AgentDescriptionAppender(data); + } + + private AgentDescriptionAppender(Supplier data) { + this.data = data; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.agent_description(data.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDisconnectAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDisconnectAppender.java new file mode 100644 index 000000000..7259f005c --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentDisconnectAppender.java @@ -0,0 +1,27 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import opamp.proto.AgentDisconnect; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class AgentDisconnectAppender implements AgentToServerAppender { + + public static AgentDisconnectAppender create() { + return new AgentDisconnectAppender(); + } + + private AgentDisconnectAppender() {} + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.agent_disconnect(new AgentDisconnect.Builder().build()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentToServerAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentToServerAppender.java new file mode 100644 index 000000000..b2ffc9312 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/AgentToServerAppender.java @@ -0,0 +1,24 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + * + *

AgentToServer request builder appender. Each implementation should match one of the + * AgentToServer fields and ensure the field is added to a request. + */ +public interface AgentToServerAppender { + /** + * Appends its data to the builder. + * + * @param builder The AgentToServer message builder. + */ + void appendTo(AgentToServer.Builder builder); +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/CapabilitiesAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/CapabilitiesAppender.java new file mode 100644 index 000000000..ba55ba431 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/CapabilitiesAppender.java @@ -0,0 +1,30 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class CapabilitiesAppender implements AgentToServerAppender { + private final Supplier capabilities; + + public static CapabilitiesAppender create(Supplier capabilities) { + return new CapabilitiesAppender(capabilities); + } + + private CapabilitiesAppender(Supplier capabilities) { + this.capabilities = capabilities; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.capabilities(capabilities.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/EffectiveConfigAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/EffectiveConfigAppender.java new file mode 100644 index 000000000..2816a7ec1 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/EffectiveConfigAppender.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentToServer; +import opamp.proto.EffectiveConfig; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class EffectiveConfigAppender implements AgentToServerAppender { + private final Supplier effectiveConfig; + + public static EffectiveConfigAppender create(Supplier effectiveConfig) { + return new EffectiveConfigAppender(effectiveConfig); + } + + private EffectiveConfigAppender(Supplier effectiveConfig) { + this.effectiveConfig = effectiveConfig; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.effective_config(effectiveConfig.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/FlagsAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/FlagsAppender.java new file mode 100644 index 000000000..0fea9db36 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/FlagsAppender.java @@ -0,0 +1,30 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class FlagsAppender implements AgentToServerAppender { + private final Supplier flags; + + public static FlagsAppender create(Supplier flags) { + return new FlagsAppender(flags); + } + + private FlagsAppender(Supplier flags) { + this.flags = flags; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.flags(flags.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/InstanceUidAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/InstanceUidAppender.java new file mode 100644 index 000000000..2ee0213fe --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/InstanceUidAppender.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import okio.ByteString; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class InstanceUidAppender implements AgentToServerAppender { + private final Supplier instanceUid; + + public static InstanceUidAppender create(Supplier instanceUid) { + return new InstanceUidAppender(instanceUid); + } + + private InstanceUidAppender(Supplier instanceUid) { + this.instanceUid = instanceUid; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.instance_uid(ByteString.of(instanceUid.get())); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/RemoteConfigStatusAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/RemoteConfigStatusAppender.java new file mode 100644 index 000000000..63b411a65 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/RemoteConfigStatusAppender.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentToServer; +import opamp.proto.RemoteConfigStatus; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class RemoteConfigStatusAppender implements AgentToServerAppender { + private final Supplier remoteConfigStatus; + + public static RemoteConfigStatusAppender create(Supplier remoteConfigStatus) { + return new RemoteConfigStatusAppender(remoteConfigStatus); + } + + private RemoteConfigStatusAppender(Supplier remoteConfigStatus) { + this.remoteConfigStatus = remoteConfigStatus; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.remote_config_status(remoteConfigStatus.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/SequenceNumberAppender.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/SequenceNumberAppender.java new file mode 100644 index 000000000..d73bcd47b --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/impl/recipe/appenders/SequenceNumberAppender.java @@ -0,0 +1,30 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe.appenders; + +import java.util.function.Supplier; +import opamp.proto.AgentToServer; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public final class SequenceNumberAppender implements AgentToServerAppender { + private final Supplier sequenceNumber; + + public static SequenceNumberAppender create(Supplier sequenceNumber) { + return new SequenceNumberAppender(sequenceNumber); + } + + private SequenceNumberAppender(Supplier sequenceNumber) { + this.sequenceNumber = sequenceNumber; + } + + @Override + public void appendTo(AgentToServer.Builder builder) { + builder.sequence_num(sequenceNumber.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/package-info.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/package-info.java new file mode 100644 index 000000000..b972b795a --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/package-info.java @@ -0,0 +1,9 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +@ParametersAreNonnullByDefault +package io.opentelemetry.opamp.client.internal; + +import javax.annotation.ParametersAreNonnullByDefault; diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Field.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Field.java new file mode 100644 index 000000000..277ecc2dd --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Field.java @@ -0,0 +1,25 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + * + *

List of supported AgentToServer + * message fields. + */ +public enum Field { + INSTANCE_UID, + SEQUENCE_NUM, + AGENT_DESCRIPTION, + CAPABILITIES, + EFFECTIVE_CONFIG, + REMOTE_CONFIG_STATUS, + AGENT_DISCONNECT, + FLAGS +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Request.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Request.java new file mode 100644 index 000000000..d6a3c7aed --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/Request.java @@ -0,0 +1,19 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request; + +import com.google.auto.value.AutoValue; +import opamp.proto.AgentToServer; + +/** Wrapper class for "AgentToServer" request body. */ +@AutoValue +public abstract class Request { + public abstract AgentToServer getAgentToServer(); + + public static Request create(AgentToServer agentToServer) { + return new AutoValue_Request(agentToServer); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/AcceptsDelaySuggestion.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/AcceptsDelaySuggestion.java new file mode 100644 index 000000000..f53b00d45 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/AcceptsDelaySuggestion.java @@ -0,0 +1,18 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import java.time.Duration; + +/** + * A {@link PeriodicDelay} implementation that wants to accept delay time suggestions, as explained + * here, + * must implement this interface. + */ +public interface AcceptsDelaySuggestion { + void suggestDelay(Duration delay); +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelay.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelay.java new file mode 100644 index 000000000..88b608c7e --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelay.java @@ -0,0 +1,38 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import java.time.Duration; +import javax.annotation.concurrent.GuardedBy; + +public final class ExponentialBackoffPeriodicDelay implements PeriodicDelay { + private final Duration initialDelay; + private final Object delayNanosLock = new Object(); + + @GuardedBy("delayNanosLock") + private long delayNanos; + + public ExponentialBackoffPeriodicDelay(Duration initialDelay) { + this.initialDelay = initialDelay; + delayNanos = initialDelay.toNanos(); + } + + @Override + public Duration getNextDelay() { + synchronized (delayNanosLock) { + long previousValue = delayNanos; + delayNanos = delayNanos * 2; + return Duration.ofNanos(previousValue); + } + } + + @Override + public void reset() { + synchronized (delayNanosLock) { + delayNanos = initialDelay.toNanos(); + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/FixedPeriodicDelay.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/FixedPeriodicDelay.java new file mode 100644 index 000000000..2b54180e5 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/FixedPeriodicDelay.java @@ -0,0 +1,24 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import java.time.Duration; + +final class FixedPeriodicDelay implements PeriodicDelay { + private final Duration duration; + + public FixedPeriodicDelay(Duration duration) { + this.duration = duration; + } + + @Override + public Duration getNextDelay() { + return duration; + } + + @Override + public void reset() {} +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/PeriodicDelay.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/PeriodicDelay.java new file mode 100644 index 000000000..67aa93491 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/PeriodicDelay.java @@ -0,0 +1,18 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import java.time.Duration; + +public interface PeriodicDelay { + static PeriodicDelay ofFixedDuration(Duration duration) { + return new FixedPeriodicDelay(duration); + } + + Duration getNextDelay(); + + void reset(); +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelay.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelay.java new file mode 100644 index 000000000..46f1a21da --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelay.java @@ -0,0 +1,41 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import java.time.Duration; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +/** Defaults to an exponential backoff strategy, unless a delay is suggested. */ +public final class RetryPeriodicDelay implements PeriodicDelay, AcceptsDelaySuggestion { + private final ExponentialBackoffPeriodicDelay exponentialBackoff; + private final AtomicReference currentDelay; + + public static RetryPeriodicDelay create(Duration initialDelay) { + return new RetryPeriodicDelay(new ExponentialBackoffPeriodicDelay(initialDelay)); + } + + private RetryPeriodicDelay(ExponentialBackoffPeriodicDelay exponentialBackoff) { + this.exponentialBackoff = exponentialBackoff; + currentDelay = new AtomicReference<>(exponentialBackoff); + } + + @Override + public void suggestDelay(Duration delay) { + currentDelay.set(PeriodicDelay.ofFixedDuration(delay)); + } + + @Override + public Duration getNextDelay() { + return Objects.requireNonNull(currentDelay.get()).getNextDelay(); + } + + @Override + public void reset() { + exponentialBackoff.reset(); + currentDelay.set(exponentialBackoff); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/DaemonThreadFactory.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/DaemonThreadFactory.java new file mode 100644 index 000000000..77f84f72a --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/DaemonThreadFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import javax.annotation.Nonnull; + +final class DaemonThreadFactory implements ThreadFactory { + private final ThreadFactory delegate = Executors.defaultThreadFactory(); + + @Override + public Thread newThread(@Nonnull Runnable r) { + Thread t = delegate.newThread(r); + try { + t.setDaemon(true); + } catch (SecurityException e) { + // Well, we tried. + } + return t; + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestService.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestService.java new file mode 100644 index 000000000..8badaa98f --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestService.java @@ -0,0 +1,264 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import io.opentelemetry.opamp.client.internal.connectivity.http.HttpErrorException; +import io.opentelemetry.opamp.client.internal.connectivity.http.HttpSender; +import io.opentelemetry.opamp.client.internal.connectivity.http.RetryAfterParser; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.request.delay.AcceptsDelaySuggestion; +import io.opentelemetry.opamp.client.internal.request.delay.PeriodicDelay; +import io.opentelemetry.opamp.client.internal.request.delay.RetryPeriodicDelay; +import io.opentelemetry.opamp.client.internal.response.OpampServerResponseException; +import io.opentelemetry.opamp.client.internal.response.Response; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import javax.annotation.Nullable; +import opamp.proto.AgentToServer; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerErrorResponseType; +import opamp.proto.ServerToAgent; + +public final class HttpRequestService implements RequestService { + private final HttpSender requestSender; + // must be a single threaded executor, the code in this class relies on requests being processed + // serially + private final ScheduledExecutorService executorService; + private final AtomicBoolean isRunning = new AtomicBoolean(false); + private final AtomicBoolean hasStopped = new AtomicBoolean(false); + private final ConnectionStatus connectionStatus; + private final AtomicReference> scheduledTask = new AtomicReference<>(); + private final RetryAfterParser retryAfterParser; + @Nullable private Callback callback; + @Nullable private Supplier requestSupplier; + public static final PeriodicDelay DEFAULT_DELAY_BETWEEN_REQUESTS = + PeriodicDelay.ofFixedDuration(Duration.ofSeconds(30)); + public static final PeriodicDelay DEFAULT_DELAY_BETWEEN_RETRIES = + RetryPeriodicDelay.create(Duration.ofSeconds(30)); + + /** + * Creates an {@link HttpRequestService}. + * + * @param requestSender The HTTP sender implementation. + */ + public static HttpRequestService create(HttpSender requestSender) { + return create(requestSender, DEFAULT_DELAY_BETWEEN_REQUESTS, DEFAULT_DELAY_BETWEEN_RETRIES); + } + + /** + * Creates an {@link HttpRequestService}. + * + * @param requestSender The HTTP sender implementation. + * @param periodicRequestDelay The time to wait between requests in general. + * @param periodicRetryDelay The time to wait between retries. + */ + public static HttpRequestService create( + HttpSender requestSender, + PeriodicDelay periodicRequestDelay, + PeriodicDelay periodicRetryDelay) { + return new HttpRequestService( + requestSender, + Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory()), + periodicRequestDelay, + periodicRetryDelay, + RetryAfterParser.getInstance()); + } + + HttpRequestService( + HttpSender requestSender, + ScheduledExecutorService executorService, + PeriodicDelay periodicRequestDelay, + PeriodicDelay periodicRetryDelay, + RetryAfterParser retryAfterParser) { + this.requestSender = requestSender; + this.executorService = executorService; + this.retryAfterParser = retryAfterParser; + this.connectionStatus = new ConnectionStatus(periodicRequestDelay, periodicRetryDelay); + } + + @Override + public void start(Callback callback, Supplier requestSupplier) { + if (hasStopped.get()) { + throw new IllegalStateException("HttpRequestService cannot start after it has been stopped."); + } + if (isRunning.compareAndSet(false, true)) { + this.callback = callback; + this.requestSupplier = requestSupplier; + scheduleNextExecution(); + } else { + throw new IllegalStateException("HttpRequestService is already running"); + } + } + + @Override + public void stop() { + if (isRunning.compareAndSet(true, false)) { + hasStopped.set(true); + executorService.shutdown(); + } + } + + @Override + public void sendRequest() { + if (!isRunning.get()) { + throw new IllegalStateException("HttpRequestService is not running"); + } + + executorService.execute( + () -> { + // cancel the already scheduled task, a new one is created after current request is + // processed + ScheduledFuture scheduledFuture = scheduledTask.get(); + if (scheduledFuture != null) { + scheduledFuture.cancel(false); + } + sendAndScheduleNext(); + }); + } + + private void sendAndScheduleNext() { + doSendRequest(); + scheduleNextExecution(); + } + + private void scheduleNextExecution() { + scheduledTask.set( + executorService.schedule( + this::sendAndScheduleNext, + connectionStatus.getNextDelay().toNanos(), + TimeUnit.NANOSECONDS)); + } + + private void doSendRequest() { + AgentToServer agentToServer = Objects.requireNonNull(requestSupplier).get().getAgentToServer(); + + byte[] data = agentToServer.encodeByteString().toByteArray(); + CompletableFuture future = + requestSender.send(outputStream -> outputStream.write(data), data.length); + try (HttpSender.Response response = future.get(30, TimeUnit.SECONDS)) { + getCallback().onConnectionSuccess(); + if (isSuccessful(response)) { + handleHttpSuccess( + Response.create(ServerToAgent.ADAPTER.decode(response.bodyInputStream()))); + } else { + handleHttpError(response); + } + } catch (IOException | InterruptedException | TimeoutException e) { + getCallback().onConnectionFailed(e); + connectionStatus.retryAfter(null); + } catch (ExecutionException e) { + if (e.getCause() != null) { + getCallback().onConnectionFailed(e.getCause()); + } else { + getCallback().onConnectionFailed(e); + } + connectionStatus.retryAfter(null); + } + } + + private void handleHttpError(HttpSender.Response response) { + int errorCode = response.statusCode(); + getCallback().onRequestFailed(new HttpErrorException(errorCode, response.statusMessage())); + + if (errorCode == 503 || errorCode == 429) { + String retryAfterHeader = response.getHeader("Retry-After"); + Duration retryAfter = null; + if (retryAfterHeader != null) { + Optional duration = retryAfterParser.tryParse(retryAfterHeader); + if (duration.isPresent()) { + retryAfter = duration.get(); + } + } + connectionStatus.retryAfter(retryAfter); + } + } + + private static boolean isSuccessful(HttpSender.Response response) { + return response.statusCode() >= 200 && response.statusCode() < 300; + } + + private void handleHttpSuccess(Response response) { + connectionStatus.success(); + ServerToAgent serverToAgent = response.getServerToAgent(); + + if (serverToAgent.error_response != null) { + handleErrorResponse(serverToAgent.error_response); + } else { + getCallback().onRequestSuccess(response); + } + } + + private void handleErrorResponse(ServerErrorResponse errorResponse) { + if (errorResponse.type.equals(ServerErrorResponseType.ServerErrorResponseType_Unavailable)) { + Duration retryAfter = null; + if (errorResponse.retry_info != null) { + retryAfter = Duration.ofNanos(errorResponse.retry_info.retry_after_nanoseconds); + } + connectionStatus.retryAfter(retryAfter); + } + getCallback() + .onRequestFailed( + new OpampServerResponseException(errorResponse, errorResponse.error_message)); + } + + private Callback getCallback() { + return Objects.requireNonNull(callback); + } + + // this class is only used from a single threaded ScheduledExecutorService, hence no + // synchronization is needed + private static class ConnectionStatus { + private final PeriodicDelay periodicRequestDelay; + private final PeriodicDelay periodicRetryDelay; + + private boolean retrying; + private PeriodicDelay currentDelay; + + ConnectionStatus(PeriodicDelay periodicRequestDelay, PeriodicDelay periodicRetryDelay) { + this.periodicRequestDelay = periodicRequestDelay; + this.periodicRetryDelay = periodicRetryDelay; + currentDelay = periodicRequestDelay; + } + + void success() { + // after successful request transition from retry to regular delay + if (retrying) { + retrying = false; + periodicRequestDelay.reset(); + currentDelay = periodicRequestDelay; + } + } + + void retryAfter(@Nullable Duration retryAfter) { + // after failed request transition from regular to retry delay + if (!retrying) { + retrying = true; + periodicRetryDelay.reset(); + currentDelay = periodicRetryDelay; + if (retryAfter != null && periodicRetryDelay instanceof AcceptsDelaySuggestion) { + ((AcceptsDelaySuggestion) periodicRetryDelay).suggestDelay(retryAfter); + } + } + } + + Duration getNextDelay() { + return currentDelay.getNextDelay(); + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestService.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestService.java new file mode 100644 index 000000000..280725884 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestService.java @@ -0,0 +1,274 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import com.squareup.wire.ProtoAdapter; +import io.opentelemetry.opamp.client.internal.connectivity.websocket.WebSocket; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.request.delay.AcceptsDelaySuggestion; +import io.opentelemetry.opamp.client.internal.request.delay.PeriodicDelay; +import io.opentelemetry.opamp.client.internal.request.delay.RetryPeriodicDelay; +import io.opentelemetry.opamp.client.internal.response.OpampServerResponseException; +import io.opentelemetry.opamp.client.internal.response.Response; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.time.Duration; +import java.util.Objects; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerErrorResponseType; +import opamp.proto.ServerToAgent; + +public final class WebSocketRequestService implements RequestService, WebSocket.Listener { + private static final PeriodicDelay DEFAULT_DELAY_BETWEEN_RETRIES = + RetryPeriodicDelay.create(Duration.ofSeconds(30)); + + private final WebSocket webSocket; + private final AtomicBoolean isRunning = new AtomicBoolean(false); + private final AtomicBoolean hasStopped = new AtomicBoolean(false); + private final ConnectionStatus connectionStatus; + private final ScheduledExecutorService executorService; + + /** Defined here. */ + private static final int WEBSOCKET_NORMAL_CLOSURE_CODE = 1000; + + @GuardedBy("hasPendingRequestLock") + private boolean hasPendingRequest = false; + + private final Object hasPendingRequestLock = new Object(); + @Nullable private Callback callback; + @Nullable private Supplier requestSupplier; + + /** + * Creates an {@link WebSocketRequestService}. + * + * @param webSocket The WebSocket implementation. + */ + public static WebSocketRequestService create(WebSocket webSocket) { + return create(webSocket, DEFAULT_DELAY_BETWEEN_RETRIES); + } + + /** + * Creates an {@link WebSocketRequestService}. + * + * @param webSocket The WebSocket implementation. + * @param periodicRetryDelay The time to wait between retries. + */ + public static WebSocketRequestService create( + WebSocket webSocket, PeriodicDelay periodicRetryDelay) { + return new WebSocketRequestService( + webSocket, + periodicRetryDelay, + Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory())); + } + + WebSocketRequestService( + WebSocket webSocket, + PeriodicDelay periodicRetryDelay, + ScheduledExecutorService executorService) { + this.webSocket = webSocket; + this.executorService = executorService; + this.connectionStatus = new ConnectionStatus(periodicRetryDelay); + } + + @Override + public void start(Callback callback, Supplier requestSupplier) { + if (hasStopped.get()) { + throw new IllegalStateException("This service is already stopped"); + } + if (isRunning.compareAndSet(false, true)) { + this.callback = callback; + this.requestSupplier = requestSupplier; + startConnection(); + } else { + throw new IllegalStateException("The service has already started"); + } + } + + private void startConnection() { + webSocket.open(this); + } + + @Override + public void sendRequest() { + if (!isRunning.get()) { + throw new IllegalStateException("The service is not running"); + } + if (hasStopped.get()) { + throw new IllegalStateException("This service is already stopped"); + } + + doSendRequest(); + } + + private void doSendRequest() { + try { + synchronized (hasPendingRequestLock) { + if (!trySendRequest()) { + hasPendingRequest = true; + } + } + } catch (IOException e) { + getCallback().onRequestFailed(e); + } + } + + private boolean trySendRequest() throws IOException { + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + ProtoAdapter.UINT64.encode(outputStream, 0L); + byte[] payload = getRequest().getAgentToServer().encode(); + outputStream.write(payload); + return webSocket.send(outputStream.toByteArray()); + } + } + + @Nonnull + private Request getRequest() { + return Objects.requireNonNull(requestSupplier).get(); + } + + @Override + public void stop() { + if (hasStopped.compareAndSet(false, true)) { + /* + Sending last message as explained in the spec: + https://opentelemetry.io/docs/specs/opamp/#websocket-transport-opamp-client-initiated. + The client implementation must ensure that the "agent_disconnect" field will be provided in the + next supplied request body. + */ + doSendRequest(); + webSocket.close(WEBSOCKET_NORMAL_CLOSURE_CODE, null); + executorService.shutdown(); + } + } + + @Override + public void onOpen() { + connectionStatus.success(); + getCallback().onConnectionSuccess(); + synchronized (hasPendingRequestLock) { + if (hasPendingRequest) { + hasPendingRequest = false; + sendRequest(); + } + } + } + + @Override + public void onMessage(byte[] data) { + try { + ServerToAgent serverToAgent = readServerToAgent(data); + + ServerErrorResponse errorResponse = serverToAgent.error_response; + if (errorResponse != null) { + handleServerError(errorResponse); + getCallback() + .onRequestFailed( + new OpampServerResponseException(errorResponse, errorResponse.error_message)); + return; + } + + getCallback().onRequestSuccess(Response.create(serverToAgent)); + } catch (IOException e) { + getCallback().onRequestFailed(e); + } + } + + private static ServerToAgent readServerToAgent(byte[] data) throws IOException { + int headerSize = ProtoAdapter.UINT64.encodedSize(ProtoAdapter.UINT64.decode(data)); + int payloadSize = data.length - headerSize; + byte[] payload = new byte[payloadSize]; + System.arraycopy(data, headerSize, payload, 0, payloadSize); + return ServerToAgent.ADAPTER.decode(payload); + } + + private void handleServerError(ServerErrorResponse errorResponse) { + if (serverIsUnavailable(errorResponse)) { + Duration retryAfter = null; + + if (errorResponse.retry_info != null) { + retryAfter = Duration.ofNanos(errorResponse.retry_info.retry_after_nanoseconds); + } + + webSocket.close(WEBSOCKET_NORMAL_CLOSURE_CODE, null); + connectionStatus.retryAfter(retryAfter); + } + } + + private static boolean serverIsUnavailable(ServerErrorResponse errorResponse) { + return errorResponse.type.equals(ServerErrorResponseType.ServerErrorResponseType_Unavailable); + } + + @Override + public void onClosing() { + // Noop + } + + @Override + public void onClosed() { + // If this service isn't stopped, we should retry connecting. + connectionStatus.retryAfter(null); + } + + @Override + public void onFailure(Throwable t) { + getCallback().onConnectionFailed(t); + connectionStatus.retryAfter(null); + } + + @Nonnull + private Callback getCallback() { + return Objects.requireNonNull(callback); + } + + private class ConnectionStatus { + private final PeriodicDelay periodicRetryDelay; + private final AtomicBoolean retryingConnection = new AtomicBoolean(false); + private final AtomicBoolean nextRetryScheduled = new AtomicBoolean(false); + + ConnectionStatus(PeriodicDelay periodicRetryDelay) { + this.periodicRetryDelay = periodicRetryDelay; + } + + void success() { + retryingConnection.set(false); + } + + @SuppressWarnings("FutureReturnValueIgnored") + void retryAfter(@Nullable Duration retryAfter) { + if (hasStopped.get()) { + return; + } + + if (retryingConnection.compareAndSet(false, true)) { + periodicRetryDelay.reset(); + if (retryAfter != null && periodicRetryDelay instanceof AcceptsDelaySuggestion) { + ((AcceptsDelaySuggestion) periodicRetryDelay).suggestDelay(retryAfter); + } + } + + if (nextRetryScheduled.compareAndSet(false, true)) { + executorService.schedule( + this::retryConnection, + periodicRetryDelay.getNextDelay().toNanos(), + TimeUnit.NANOSECONDS); + } + } + + private void retryConnection() { + nextRetryScheduled.set(false); + startConnection(); + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/MessageData.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/MessageData.java new file mode 100644 index 000000000..af47027ef --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/MessageData.java @@ -0,0 +1,32 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.response; + +import com.google.auto.value.AutoValue; +import io.opentelemetry.opamp.client.OpampClient; +import javax.annotation.Nullable; +import opamp.proto.AgentRemoteConfig; + +/** + * Data class provided in {@link OpampClient.Callbacks#onMessage(OpampClient, MessageData)} with + * Server's provided status changes. + */ +@AutoValue +public abstract class MessageData { + @Nullable + public abstract AgentRemoteConfig getRemoteConfig(); + + public static Builder builder() { + return new AutoValue_MessageData.Builder(); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setRemoteConfig(AgentRemoteConfig remoteConfig); + + public abstract MessageData build(); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/OpampServerResponseException.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/OpampServerResponseException.java new file mode 100644 index 000000000..5eb3fba57 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/OpampServerResponseException.java @@ -0,0 +1,29 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.response; + +import opamp.proto.ServerErrorResponse; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +public class OpampServerResponseException extends Exception { + private static final long serialVersionUID = 1L; + + public final ServerErrorResponse errorResponse; + + /** + * Constructs an OpAMP error related exception. + * + * @param errorResponse The OpAMP error. + * @param message The OpAMP error message. + */ + public OpampServerResponseException(ServerErrorResponse errorResponse, String message) { + super(message); + this.errorResponse = errorResponse; + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/Response.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/Response.java new file mode 100644 index 000000000..c9b6bc19e --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/response/Response.java @@ -0,0 +1,18 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.response; + +import com.google.auto.value.AutoValue; +import opamp.proto.ServerToAgent; + +@AutoValue +public abstract class Response { + public abstract ServerToAgent getServerToAgent(); + + public static Response create(ServerToAgent serverToAgent) { + return new AutoValue_Response(serverToAgent); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/InMemoryState.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/InMemoryState.java new file mode 100644 index 000000000..be2d34215 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/InMemoryState.java @@ -0,0 +1,39 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.state; + +import static java.util.Objects.requireNonNull; + +import java.util.concurrent.atomic.AtomicReference; +import javax.annotation.Nonnull; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + */ +abstract class InMemoryState implements State { + private final AtomicReference state = new AtomicReference<>(); + + public InMemoryState(T initialValue) { + if (initialValue == null) { + throw new IllegalArgumentException("The value must not be null"); + } + state.set(initialValue); + } + + public void set(T value) { + if (value == null) { + throw new IllegalArgumentException("The value must not be null"); + } + state.set(value); + } + + @Nonnull + @Override + public T get() { + return requireNonNull(state.get()); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/ObservableState.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/ObservableState.java new file mode 100644 index 000000000..bfd4d46bc --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/ObservableState.java @@ -0,0 +1,49 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.state; + +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + * + *

Implementations of state that cannot be stored in memory. Users would need to implement its + * {@link #get()} method and ensure to call {@link #notifyUpdate()} whenever new data is available + * for the next client request. + */ +public abstract class ObservableState implements State { + private final Set listeners = Collections.synchronizedSet(new HashSet<>()); + + public final void addListener(Listener listener) { + listeners.add(listener); + } + + public final void removeListener(Listener listener) { + listeners.remove(listener); + } + + public final void notifyUpdate() { + synchronized (listeners) { + for (Listener listener : listeners) { + listener.onStateUpdate(getFieldType()); + } + } + } + + public interface Listener { + /** + * Notifies that there's new data available for this state, so that the client includes it in + * the next request. + * + * @param type The field type associated to this state. + */ + void onStateUpdate(Field type); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/State.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/State.java new file mode 100644 index 000000000..abb0a6a9d --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/state/State.java @@ -0,0 +1,108 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.state; + +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.Objects; +import java.util.function.Supplier; +import javax.annotation.Nonnull; + +/** + * This class is internal and is hence not for public use. Its APIs are unstable and can change at + * any time. + * + *

Abstraction for a client request field that carries data. Each implementation can only be + * linked to one type of client request field, which is provided in its {@link #getFieldType()} + * method. + */ +public interface State extends Supplier { + + Field getFieldType(); + + @Nonnull + default T mustGet() { + return Objects.requireNonNull(get()); + } + + final class InstanceUid extends InMemoryState { + public InstanceUid(byte[] initialValue) { + super(initialValue); + } + + @Override + public Field getFieldType() { + return Field.INSTANCE_UID; + } + } + + final class SequenceNum extends InMemoryState { + public SequenceNum(Long initialValue) { + super(initialValue); + } + + public void increment() { + set(mustGet() + 1); + } + + @Override + public Field getFieldType() { + return Field.SEQUENCE_NUM; + } + } + + final class AgentDescription extends InMemoryState { + public AgentDescription(opamp.proto.AgentDescription initialValue) { + super(initialValue); + } + + @Override + public Field getFieldType() { + return Field.AGENT_DESCRIPTION; + } + } + + final class Capabilities extends InMemoryState { + public Capabilities(Long initialValue) { + super(initialValue); + } + + @Override + public Field getFieldType() { + return Field.CAPABILITIES; + } + } + + final class RemoteConfigStatus extends InMemoryState { + + public RemoteConfigStatus(opamp.proto.RemoteConfigStatus initialValue) { + super(initialValue); + } + + @Override + public Field getFieldType() { + return Field.REMOTE_CONFIG_STATUS; + } + } + + final class Flags extends InMemoryState { + + public Flags(Long initialValue) { + super(initialValue); + } + + @Override + public Field getFieldType() { + return Field.FLAGS; + } + } + + abstract class EffectiveConfig extends ObservableState { + @Override + public final Field getFieldType() { + return Field.EFFECTIVE_CONFIG; + } + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/tools/SystemTime.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/tools/SystemTime.java new file mode 100644 index 000000000..1d7f9e61e --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/internal/tools/SystemTime.java @@ -0,0 +1,21 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.tools; + +/** Utility to be able to mock the current system time for testing purposes. */ +public final class SystemTime { + private static final SystemTime INSTANCE = new SystemTime(); + + public static SystemTime getInstance() { + return INSTANCE; + } + + private SystemTime() {} + + public long getCurrentTimeMillis() { + return System.currentTimeMillis(); + } +} diff --git a/opamp-client/src/main/java/io/opentelemetry/opamp/client/request/service/RequestService.java b/opamp-client/src/main/java/io/opentelemetry/opamp/client/request/service/RequestService.java new file mode 100644 index 000000000..76d1650e3 --- /dev/null +++ b/opamp-client/src/main/java/io/opentelemetry/opamp/client/request/service/RequestService.java @@ -0,0 +1,78 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.request.service; + +import io.opentelemetry.opamp.client.OpampClient; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.request.service.HttpRequestService; +import io.opentelemetry.opamp.client.internal.request.service.WebSocketRequestService; +import io.opentelemetry.opamp.client.internal.response.Response; +import java.util.function.Supplier; + +/** + * Handles the network connectivity in general, its implementation can choose what protocol to use + * (HTTP or WebSocket) and should provide the necessary configurations options depending on the + * case. There are 2 implementations ready to use, {@link HttpRequestService}, for using HTTP, and + * {@link WebSocketRequestService} for using WebSocket. The {@link OpampClient} must not be aware of + * the specific implementation it uses as it can expect the same behavior from either. + */ +public interface RequestService { + + /** + * Starts the service. The actions done in this method depend on the implementation. For HTTP this + * is where the periodic poll task should get started, whereas for WebSocket this is where the + * connectivity is started. + * + * @param callback This is the only way that the service can communicate back to the {@link + * OpampClient} implementation. + * @param requestSupplier This supplier must be queried every time a new request is about to be + * sent. + */ + void start(Callback callback, Supplier requestSupplier); + + /** Triggers a new request send. */ + void sendRequest(); + + /** + * Clears the service for good. No further calls to {@link #sendRequest()} can be made after this + * method is called. + */ + void stop(); + + /** Allows the service to talk back to the {@link OpampClient} implementation. */ + interface Callback { + /** + * For WebSocket implementations, this is called when the connection is established. For HTTP + * implementations, this is called on every HTTP request that ends successfully. + */ + void onConnectionSuccess(); + + /** + * For WebSocket implementations, this is called when the connection cannot be made or is lost. + * For HTTP implementations, this is called on every HTTP request that cannot get a response. + * + * @param throwable The detailed error. + */ + void onConnectionFailed(Throwable throwable); + + /** + * For WebSocket implementations, this is called every time there's a new message from the + * server. For HTTP implementations, this is called when a successful HTTP request is finished + * with a valid server to agent response body. + * + * @param response The server to agent message. + */ + void onRequestSuccess(Response response); + + /** + * For both HTTP and WebSocket implementations, this is called when an attempt at sending a + * message fails. + * + * @param throwable The detailed error. + */ + void onRequestFailed(Throwable throwable); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParserTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParserTest.java new file mode 100644 index 000000000..ece1ff0a7 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/http/RetryAfterParserTest.java @@ -0,0 +1,34 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.http; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import io.opentelemetry.opamp.client.internal.tools.SystemTime; +import java.time.Duration; +import org.junit.jupiter.api.Test; + +class RetryAfterParserTest { + + @Test + void verifyParsing() { + SystemTime systemTime = mock(); + long currentTimeMillis = 1577836800000L; // Wed, 01 Jan 2020 00:00:00 GMT + when(systemTime.getCurrentTimeMillis()).thenReturn(currentTimeMillis); + + RetryAfterParser parser = new RetryAfterParser(systemTime); + + assertThat(parser.tryParse("123")).get().isEqualTo(Duration.ofSeconds(123)); + assertThat(parser.tryParse("Wed, 01 Jan 2020 01:00:00 GMT")) + .get() + .isEqualTo(Duration.ofHours(1)); + + // Check when provided time is older than the current one + assertThat(parser.tryParse("Tue, 31 Dec 2019 23:00:00 GMT")).isNotPresent(); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocketTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocketTest.java new file mode 100644 index 000000000..ab84b3881 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/connectivity/websocket/OkHttpWebSocketTest.java @@ -0,0 +1,152 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.connectivity.websocket; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.WebSocketListener; +import okio.ByteString; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class OkHttpWebSocketTest { + @Mock private OkHttpClient client; + @Mock private okhttp3.WebSocket okHttpWebSocket; + @Mock private WebSocket.Listener listener; + @Captor private ArgumentCaptor requestCaptor; + @Captor private ArgumentCaptor listenerCaptor; + private static final String URL = "ws://some.server"; + private OkHttpWebSocket webSocket; + + @BeforeEach + void setUp() { + webSocket = OkHttpWebSocket.create(URL, client); + when(client.newWebSocket(any(), any())).thenReturn(okHttpWebSocket); + } + + @Test + void validateOpen() { + // Assert websocket created + openAndCaptureArguments(); + assertThat(requestCaptor.getValue().url().host()).isEqualTo("some.server"); + + // Assert further calls to open won't do anything + webSocket.open(listener); + verifyNoMoreInteractions(client); + + // When connectivity succeeds, open calls won't do anything. + callOnOpen(); + webSocket.open(listener); + verifyNoMoreInteractions(client); + + // When connectivity fails, allow future open calls + clearInvocations(client); + callOnFailure(); + openAndCaptureArguments(); + assertThat(requestCaptor.getValue().url().host()).isEqualTo("some.server"); + } + + @Test + void validateSend() { + byte[] payload = new byte[1]; + + // Before opening + assertThat(webSocket.send(payload)).isFalse(); + + // After opening successfully + when(okHttpWebSocket.send(any(ByteString.class))).thenReturn(true); + openAndCaptureArguments(); + callOnOpen(); + assertThat(webSocket.send(payload)).isTrue(); + verify(okHttpWebSocket).send(ByteString.of(payload)); + + // After failing + callOnFailure(); + assertThat(webSocket.send(payload)).isFalse(); + verifyNoMoreInteractions(okHttpWebSocket); + } + + @Test + void validateClose() { + openAndCaptureArguments(); + + callOnOpen(); + webSocket.close(123, "something"); + verify(okHttpWebSocket).close(123, "something"); + + // Validate calling it again + webSocket.close(1, null); + verifyNoMoreInteractions(okHttpWebSocket); + + // Once closed, it should be possible to reopen it. + clearInvocations(client); + callOnClosed(); + openAndCaptureArguments(); + assertThat(requestCaptor.getValue().url().host()).isEqualTo("some.server"); + } + + @Test + void validateOnClosing() { + openAndCaptureArguments(); + + callOnOpen(); + callOnClosing(); + + // Validate calling after onClosing + webSocket.close(1, null); + verifyNoInteractions(okHttpWebSocket); + } + + @Test + void validateOnMessage() { + byte[] payload = new byte[1]; + openAndCaptureArguments(); + + listenerCaptor.getValue().onMessage(mock(), ByteString.of(payload)); + verify(listener).onMessage(payload); + } + + private void callOnOpen() { + listenerCaptor.getValue().onOpen(mock(), mock()); + verify(listener).onOpen(); + } + + private void callOnClosed() { + listenerCaptor.getValue().onClosed(mock(), 0, ""); + verify(listener).onClosed(); + } + + private void callOnClosing() { + listenerCaptor.getValue().onClosing(mock(), 0, ""); + verify(listener).onClosing(); + } + + private void callOnFailure() { + Throwable t = mock(); + listenerCaptor.getValue().onFailure(mock(), t, mock()); + verify(listener).onFailure(t); + } + + private void openAndCaptureArguments() { + webSocket.open(listener); + verify(client).newWebSocket(requestCaptor.capture(), listenerCaptor.capture()); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImplTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImplTest.java new file mode 100644 index 000000000..020b1b2e2 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientImplTest.java @@ -0,0 +1,474 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +import io.opentelemetry.opamp.client.OpampClient; +import io.opentelemetry.opamp.client.internal.connectivity.http.OkHttpSender; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.request.service.HttpRequestService; +import io.opentelemetry.opamp.client.internal.response.MessageData; +import io.opentelemetry.opamp.client.internal.state.State; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import mockwebserver3.MockResponse; +import mockwebserver3.MockWebServer; +import mockwebserver3.RecordedRequest; +import mockwebserver3.junit5.StartStop; +import okio.Buffer; +import okio.ByteString; +import opamp.proto.AgentConfigFile; +import opamp.proto.AgentConfigMap; +import opamp.proto.AgentDescription; +import opamp.proto.AgentIdentification; +import opamp.proto.AgentRemoteConfig; +import opamp.proto.AgentToServer; +import opamp.proto.AgentToServerFlags; +import opamp.proto.AnyValue; +import opamp.proto.EffectiveConfig; +import opamp.proto.KeyValue; +import opamp.proto.RemoteConfigStatus; +import opamp.proto.RemoteConfigStatuses; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerToAgent; +import opamp.proto.ServerToAgentFlags; +import org.jetbrains.annotations.Nullable; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class OpampClientImplTest { + private RequestService requestService; + private OpampClientState state; + private OpampClientImpl client; + private TestEffectiveConfig effectiveConfig; + private TestCallbacks callbacks; + @StartStop private final MockWebServer server = new MockWebServer(); + + @BeforeEach + void setUp() { + effectiveConfig = + new TestEffectiveConfig( + new EffectiveConfig.Builder() + .config_map(createAgentConfigMap("first", "first content")) + .build()); + state = + new OpampClientState( + new State.RemoteConfigStatus( + getRemoteConfigStatus(RemoteConfigStatuses.RemoteConfigStatuses_UNSET)), + new State.SequenceNum(1L), + new State.AgentDescription(new AgentDescription.Builder().build()), + new State.Capabilities(5L), + new State.InstanceUid(new byte[] {1, 2, 3}), + new State.Flags((long) AgentToServerFlags.AgentToServerFlags_Unspecified.getValue()), + effectiveConfig); + requestService = createHttpService(); + } + + @AfterEach + void tearDown() { + client.close(); + } + + @Test + void verifyFieldsSent() { + // Check first request + ServerToAgent response = new ServerToAgent.Builder().build(); + RecordedRequest firstRequest = initializeClient(response); + AgentToServer firstMessage = getAgentToServerMessage(firstRequest); + + // Required first request fields + assertThat(firstMessage.instance_uid).isNotNull(); + assertThat(firstMessage.sequence_num).isEqualTo(1); + assertThat(firstMessage.capabilities).isEqualTo(state.capabilities.get()); + assertThat(firstMessage.agent_description).isEqualTo(state.agentDescription.get()); + assertThat(firstMessage.effective_config).isEqualTo(state.effectiveConfig.get()); + assertThat(firstMessage.remote_config_status).isEqualTo(state.remoteConfigStatus.get()); + + // Check second request + enqueueServerToAgentResponse(response); + RemoteConfigStatus remoteConfigStatus = + new RemoteConfigStatus.Builder() + .status(RemoteConfigStatuses.RemoteConfigStatuses_APPLYING) + .build(); + client.setRemoteConfigStatus(remoteConfigStatus); + + RecordedRequest secondRequest = takeRequest(); + AgentToServer secondMessage = getAgentToServerMessage(secondRequest); + + // Verify only changed and required fields are present + assertThat(secondMessage.instance_uid).isNotNull(); + assertThat(secondMessage.sequence_num).isEqualTo(2); + assertThat(firstMessage.capabilities).isEqualTo(state.capabilities.get()); + assertThat(secondMessage.agent_description).isNull(); + assertThat(secondMessage.effective_config).isNull(); + assertThat(secondMessage.remote_config_status).isEqualTo(remoteConfigStatus); + + // Check state observing + enqueueServerToAgentResponse(response); + EffectiveConfig otherConfig = + new EffectiveConfig.Builder() + .config_map(createAgentConfigMap("other", "other value")) + .build(); + effectiveConfig.config = otherConfig; + effectiveConfig.notifyUpdate(); + + // Check third request + RecordedRequest thirdRequest = takeRequest(); + AgentToServer thirdMessage = getAgentToServerMessage(thirdRequest); + + assertThat(thirdMessage.instance_uid).isNotNull(); + assertThat(thirdMessage.sequence_num).isEqualTo(3); + assertThat(firstMessage.capabilities).isEqualTo(state.capabilities.get()); + assertThat(thirdMessage.agent_description).isNull(); + assertThat(thirdMessage.remote_config_status).isNull(); + assertThat(thirdMessage.effective_config) + .isEqualTo(otherConfig); // it was changed via observable state + + // Check when the server requests for all fields + + ServerToAgent reportFullState = + new ServerToAgent.Builder() + .flags(ServerToAgentFlags.ServerToAgentFlags_ReportFullState.getValue()) + .build(); + enqueueServerToAgentResponse(reportFullState); + requestService.sendRequest(); + takeRequest(); // Notifying the client to send all fields next time + + // Request with all fields + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + requestService.sendRequest(); + + AgentToServer fullRequestedMessage = getAgentToServerMessage(takeRequest()); + + // Required first request fields + assertThat(fullRequestedMessage.instance_uid).isNotNull(); + assertThat(fullRequestedMessage.sequence_num).isEqualTo(5); + assertThat(fullRequestedMessage.capabilities).isEqualTo(state.capabilities.get()); + assertThat(fullRequestedMessage.agent_description).isEqualTo(state.agentDescription.get()); + assertThat(fullRequestedMessage.effective_config).isEqualTo(state.effectiveConfig.get()); + assertThat(fullRequestedMessage.remote_config_status).isEqualTo(state.remoteConfigStatus.get()); + } + + @Test + void verifyStop() { + initializeClient(); + + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + client.close(); + + AgentToServer agentToServerMessage = getAgentToServerMessage(takeRequest()); + assertThat(agentToServerMessage.agent_disconnect).isNotNull(); + } + + @Test + void onSuccess_withChangesToReport_notifyCallbackOnMessage() { + initializeClient(); + AgentRemoteConfig remoteConfig = + new AgentRemoteConfig.Builder() + .config(createAgentConfigMap("someKey", "someValue")) + .build(); + ServerToAgent serverToAgent = new ServerToAgent.Builder().remote_config(remoteConfig).build(); + enqueueServerToAgentResponse(serverToAgent); + + // Force request + requestService.sendRequest(); + + // Await for onMessage call + await().atMost(Duration.ofSeconds(5)).until(() -> callbacks.onMessageCalls.get() == 1); + + verify(callbacks) + .onMessage(client, MessageData.builder().setRemoteConfig(remoteConfig).build()); + } + + @Test + void onSuccess_withNoChangesToReport_doNotNotifyCallbackOnMessage() { + initializeClient(); + ServerToAgent serverToAgent = new ServerToAgent.Builder().build(); + enqueueServerToAgentResponse(serverToAgent); + + // Force request + requestService.sendRequest(); + + // Giving some time for the callback to get called + await().during(Duration.ofSeconds(1)); + + verify(callbacks, never()).onMessage(eq(client), any()); + } + + @Test + void verifyAgentDescriptionSetter() { + initializeClient(); + AgentDescription agentDescription = + getAgentDescriptionWithOneIdentifyingValue("service.name", "My service"); + + // Update when changed + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + client.setAgentDescription(agentDescription); + assertThat(takeRequest()).isNotNull(); + + // Ignore when the provided value is the same as the current one + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + client.setAgentDescription(agentDescription); + assertThat(takeRequest()).isNull(); + } + + @Test + void verifyRemoteConfigStatusSetter() { + initializeClient(); + RemoteConfigStatus remoteConfigStatus = + getRemoteConfigStatus(RemoteConfigStatuses.RemoteConfigStatuses_APPLYING); + + // Update when changed + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + client.setRemoteConfigStatus(remoteConfigStatus); + assertThat(takeRequest()).isNotNull(); + + // Ignore when the provided value is the same as the current one + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + client.setRemoteConfigStatus(remoteConfigStatus); + assertThat(takeRequest()).isNull(); + } + + @Test + void onConnectionSuccessful_notifyCallback() { + initializeClient(); + + await().atMost(Duration.ofSeconds(5)).until(() -> callbacks.onConnectCalls.get() == 1); + + verify(callbacks).onConnect(client); + verify(callbacks, never()).onConnectFailed(eq(client), any()); + } + + @Test + void onFailedResponse_keepFieldsForNextRequest() { + initializeClient(); + + // Mock failed request + server.enqueue(new MockResponse.Builder().code(404).build()); + + // Adding a non-constant field + AgentDescription agentDescription = + getAgentDescriptionWithOneIdentifyingValue("service.namespace", "something"); + client.setAgentDescription(agentDescription); + + // Assert first request contains it + assertThat(getAgentToServerMessage(takeRequest()).agent_description) + .isEqualTo(agentDescription); + + // Since it failed, send the agent description field in the next request + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + requestService.sendRequest(); + assertThat(getAgentToServerMessage(takeRequest()).agent_description) + .isEqualTo(agentDescription); + + // When there's no failure, do not keep it. + enqueueServerToAgentResponse(new ServerToAgent.Builder().build()); + requestService.sendRequest(); + assertThat(getAgentToServerMessage(takeRequest()).agent_description).isNull(); + } + + @Test + void onFailedResponse_withServerErrorData_notifyCallback() { + initializeClient(); + + ServerErrorResponse errorResponse = new ServerErrorResponse.Builder().build(); + enqueueServerToAgentResponse(new ServerToAgent.Builder().error_response(errorResponse).build()); + + // Force request + requestService.sendRequest(); + + await().atMost(Duration.ofSeconds(5)).until(() -> callbacks.onErrorResponseCalls.get() == 1); + + verify(callbacks).onErrorResponse(client, errorResponse); + verify(callbacks, never()).onMessage(eq(client), any()); + } + + @Test + void onConnectionFailed_notifyCallback() { + initializeClient(); + Throwable throwable = new Throwable(); + + client.onConnectionFailed(throwable); + + verify(callbacks).onConnectFailed(client, throwable); + } + + @Test + void whenServerProvidesNewInstanceUid_useIt() { + initializeClient(); + byte[] initialUid = state.instanceUid.get(); + + byte[] serverProvidedUid = new byte[] {1, 2, 3}; + ServerToAgent response = + new ServerToAgent.Builder() + .agent_identification( + new AgentIdentification.Builder() + .new_instance_uid(ByteString.of(serverProvidedUid)) + .build()) + .build(); + + enqueueServerToAgentResponse(response); + requestService.sendRequest(); + + await().atMost(Duration.ofSeconds(5)).until(() -> state.instanceUid.get() != initialUid); + + assertThat(state.instanceUid.get()).isEqualTo(serverProvidedUid); + } + + private static AgentToServer getAgentToServerMessage(RecordedRequest request) { + try { + return AgentToServer.ADAPTER.decode(Objects.requireNonNull(request.getBody())); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private RecordedRequest takeRequest() { + try { + return server.takeRequest(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private void enqueueServerToAgentResponse(ServerToAgent response) { + server.enqueue(getMockResponse(response)); + } + + @Nonnull + private static MockResponse getMockResponse(ServerToAgent response) { + Buffer bodyBuffer = new Buffer(); + bodyBuffer.write(response.encode()); + return new MockResponse.Builder().code(200).body(bodyBuffer).build(); + } + + private static RemoteConfigStatus getRemoteConfigStatus(RemoteConfigStatuses status) { + return new RemoteConfigStatus.Builder().status(status).build(); + } + + private static AgentConfigMap createAgentConfigMap(String key, String content) { + Map keyToFile = new HashMap<>(); + keyToFile.put(key, new AgentConfigFile.Builder().body(ByteString.encodeUtf8(content)).build()); + return new AgentConfigMap.Builder().config_map(keyToFile).build(); + } + + private static AgentDescription getAgentDescriptionWithOneIdentifyingValue( + String key, String value) { + KeyValue keyValue = + new KeyValue.Builder() + .key(key) + .value(new AnyValue.Builder().string_value(value).build()) + .build(); + List keyValues = new ArrayList<>(); + keyValues.add(keyValue); + return new AgentDescription.Builder().identifying_attributes(keyValues).build(); + } + + private RecordedRequest initializeClient() { + return initializeClient(new ServerToAgent.Builder().build()); + } + + private RecordedRequest initializeClient(ServerToAgent initialResponse) { + // Prepare first request on start + enqueueServerToAgentResponse(initialResponse); + + callbacks = spy(new TestCallbacks()); + client = OpampClientImpl.create(requestService, state, callbacks); + + return takeRequest(); + } + + private static class TestEffectiveConfig extends State.EffectiveConfig { + private opamp.proto.EffectiveConfig config; + + TestEffectiveConfig(opamp.proto.EffectiveConfig initialValue) { + config = initialValue; + } + + @Override + public opamp.proto.EffectiveConfig get() { + return config; + } + } + + private RequestService createHttpService() { + return new TestHttpRequestService( + HttpRequestService.create(OkHttpSender.create(server.url("/v1/opamp").toString()))); + } + + private static class TestHttpRequestService implements RequestService { + private final HttpRequestService delegate; + + private TestHttpRequestService(HttpRequestService delegate) { + this.delegate = delegate; + } + + @Override + public void start(Callback callback, Supplier requestSupplier) { + delegate.start(callback, requestSupplier); + } + + @Override + public void sendRequest() { + delegate.sendRequest(); + } + + @Override + public void stop() { + // This is to verify agent disconnect field presence for the websocket use case. + delegate.sendRequest(); + delegate.stop(); + } + } + + private static class TestCallbacks implements OpampClient.Callbacks { + private final AtomicInteger onConnectCalls = new AtomicInteger(); + private final AtomicInteger onConnectFailedCalls = new AtomicInteger(); + private final AtomicInteger onErrorResponseCalls = new AtomicInteger(); + private final AtomicInteger onMessageCalls = new AtomicInteger(); + + @Override + public void onConnect(OpampClient client) { + onConnectCalls.incrementAndGet(); + } + + @Override + public void onConnectFailed(OpampClient client, @Nullable Throwable throwable) { + onConnectFailedCalls.incrementAndGet(); + } + + @Override + public void onErrorResponse(OpampClient client, ServerErrorResponse errorResponse) { + onErrorResponseCalls.incrementAndGet(); + } + + @Override + public void onMessage(OpampClient client, MessageData messageData) { + onMessageCalls.incrementAndGet(); + } + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientStateTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientStateTest.java new file mode 100644 index 000000000..000b4a6fe --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/OpampClientStateTest.java @@ -0,0 +1,43 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.opamp.client.internal.state.State; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("UnusedVariable") +class OpampClientStateTest { + @Mock private State.RemoteConfigStatus remoteConfigStatus; + @Mock private State.SequenceNum sequenceNum; + @Mock private State.AgentDescription agentDescription; + @Mock private State.Capabilities capabilities; + @Mock private State.InstanceUid instanceUid; + @Mock private State.Flags flags; + @Mock private State.EffectiveConfig effectiveConfig; + @InjectMocks private OpampClientState state; + + @Test + void verifyAllFields() throws IllegalAccessException { + List> stateFields = new ArrayList<>(); + for (Field field : OpampClientState.class.getFields()) { + if (State.class.isAssignableFrom(field.getType())) { + stateFields.add((State) field.get(state)); + } + } + + assertThat(state.getAll()).containsAll(stateFields); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppendersTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppendersTest.java new file mode 100644 index 000000000..6bab5615f --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/AgentToServerAppendersTest.java @@ -0,0 +1,53 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDescriptionAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentDisconnectAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.AgentToServerAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.CapabilitiesAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.EffectiveConfigAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.FlagsAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.InstanceUidAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.RemoteConfigStatusAppender; +import io.opentelemetry.opamp.client.internal.impl.recipe.appenders.SequenceNumberAppender; +import io.opentelemetry.opamp.client.internal.request.Field; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class AgentToServerAppendersTest { + @Mock private AgentDescriptionAppender agentDescriptionAppender; + @Mock private EffectiveConfigAppender effectiveConfigAppender; + @Mock private RemoteConfigStatusAppender remoteConfigStatusAppender; + @Mock private SequenceNumberAppender sequenceNumberAppender; + @Mock private CapabilitiesAppender capabilitiesAppender; + @Mock private FlagsAppender flagsAppender; + @Mock private InstanceUidAppender instanceUidAppender; + @Mock private AgentDisconnectAppender agentDisconnectAppender; + @InjectMocks private AgentToServerAppenders appenders; + + @Test + void verifyAppenderList() { + verifyMapping(Field.AGENT_DESCRIPTION, agentDescriptionAppender); + verifyMapping(Field.EFFECTIVE_CONFIG, effectiveConfigAppender); + verifyMapping(Field.REMOTE_CONFIG_STATUS, remoteConfigStatusAppender); + verifyMapping(Field.SEQUENCE_NUM, sequenceNumberAppender); + verifyMapping(Field.CAPABILITIES, capabilitiesAppender); + verifyMapping(Field.INSTANCE_UID, instanceUidAppender); + verifyMapping(Field.FLAGS, flagsAppender); + verifyMapping(Field.AGENT_DISCONNECT, agentDisconnectAppender); + } + + private void verifyMapping(Field type, AgentToServerAppender appender) { + assertThat(appenders.getForField(type)).isEqualTo(appender); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManagerTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManagerTest.java new file mode 100644 index 000000000..2da1d13ec --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/impl/recipe/RecipeManagerTest.java @@ -0,0 +1,66 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.impl.recipe; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.opamp.client.internal.request.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import javax.annotation.Nonnull; +import org.junit.jupiter.api.Test; + +class RecipeManagerTest { + + @Test + void verifyConstantValues() { + RecipeManager recipeManager = + RecipeManager.create(getFieldsAsList(Field.AGENT_DESCRIPTION, Field.FLAGS)); + + // First run + assertThat(recipeManager.next().build().getFields()) + .containsExactlyInAnyOrder(Field.AGENT_DESCRIPTION, Field.FLAGS); + + // Adding extra fields + recipeManager.next().addField(Field.CAPABILITIES); + + assertThat(recipeManager.next().build().getFields()) + .containsExactlyInAnyOrder(Field.AGENT_DESCRIPTION, Field.FLAGS, Field.CAPABILITIES); + + // Not adding fields for the next build + assertThat(recipeManager.next().build().getFields()) + .containsExactlyInAnyOrder(Field.AGENT_DESCRIPTION, Field.FLAGS); + } + + @Test + void verifyPreviousFields() { + RecipeManager recipeManager = + RecipeManager.create(getFieldsAsList(Field.CAPABILITIES, Field.FLAGS)); + + // Previous build when there's none + assertThat(recipeManager.previous()).isNull(); + + // First build + Collection fields = + recipeManager.next().addField(Field.REMOTE_CONFIG_STATUS).build().getFields(); + assertThat(fields) + .containsExactlyInAnyOrder(Field.CAPABILITIES, Field.FLAGS, Field.REMOTE_CONFIG_STATUS); + assertThat(recipeManager.previous().getFields()).isEqualTo(fields); + + // Merging fields + recipeManager.next().addField(Field.AGENT_DISCONNECT).merge(recipeManager.previous()); + assertThat(recipeManager.next().build().getFields()) + .containsExactlyInAnyOrder( + Field.CAPABILITIES, Field.FLAGS, Field.REMOTE_CONFIG_STATUS, Field.AGENT_DISCONNECT); + } + + @Nonnull + private static List getFieldsAsList(Field... fields) { + return new ArrayList<>(Arrays.asList(fields)); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelayTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelayTest.java new file mode 100644 index 000000000..ff7a1e186 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/ExponentialBackoffPeriodicDelayTest.java @@ -0,0 +1,29 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import org.junit.jupiter.api.Test; + +class ExponentialBackoffPeriodicDelayTest { + @Test + void verifyDelayUpdates() { + ExponentialBackoffPeriodicDelay delay = + new ExponentialBackoffPeriodicDelay(Duration.ofSeconds(1)); + + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(1)); + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(2)); + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(4)); + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(8)); + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(16)); + + // Reset + delay.reset(); + assertThat(delay.getNextDelay()).isEqualTo(Duration.ofSeconds(1)); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelayTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelayTest.java new file mode 100644 index 000000000..88bda4ec7 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/delay/RetryPeriodicDelayTest.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.delay; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import org.junit.jupiter.api.Test; + +class RetryPeriodicDelayTest { + @Test + public void verifyDelayBehavior() { + RetryPeriodicDelay retryPeriodicDelay = RetryPeriodicDelay.create(Duration.ofSeconds(1)); + + // Without suggested delay + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(1)); + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(2)); + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(4)); + retryPeriodicDelay.reset(); + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(1)); + + // With suggested delay + retryPeriodicDelay.suggestDelay(Duration.ofSeconds(5)); + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(5)); + retryPeriodicDelay.reset(); + assertThat(retryPeriodicDelay.getNextDelay()).isEqualTo(Duration.ofSeconds(1)); + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestServiceTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestServiceTest.java new file mode 100644 index 000000000..9c5ea516d --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/HttpRequestServiceTest.java @@ -0,0 +1,380 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import io.opentelemetry.opamp.client.internal.connectivity.http.HttpErrorException; +import io.opentelemetry.opamp.client.internal.connectivity.http.HttpSender; +import io.opentelemetry.opamp.client.internal.connectivity.http.RetryAfterParser; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.request.delay.PeriodicDelay; +import io.opentelemetry.opamp.client.internal.response.Response; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.io.ByteArrayInputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Queue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import opamp.proto.AgentToServer; +import opamp.proto.RetryInfo; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerErrorResponseType; +import opamp.proto.ServerToAgent; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@SuppressWarnings("unchecked") +@ExtendWith(MockitoExtension.class) +class HttpRequestServiceTest { + + private static final Duration REGULAR_DELAY = Duration.ofSeconds(1); + private static final Duration RETRY_DELAY = Duration.ofSeconds(5); + + @Mock private RequestService.Callback callback; + private TestScheduler scheduler; + private TestHttpSender requestSender; + private PeriodicDelay periodicRequestDelay; + private PeriodicDelayWithSuggestion periodicRetryDelay; + private int requestSize = -1; + private HttpRequestService httpRequestService; + + @BeforeEach + void setUp() { + requestSender = new TestHttpSender(); + periodicRequestDelay = createPeriodicDelay(REGULAR_DELAY); + periodicRetryDelay = createPeriodicDelayWithSuggestionSupport(RETRY_DELAY); + scheduler = new TestScheduler(); + httpRequestService = + new HttpRequestService( + requestSender, + scheduler.getMockService(), + periodicRequestDelay, + periodicRetryDelay, + RetryAfterParser.getInstance()); + httpRequestService.start(callback, this::createRequest); + } + + @AfterEach + void tearDown() { + httpRequestService.stop(); + verify(scheduler.getMockService()).shutdown(); + } + + @Test + void verifyStart_scheduledFirstTask() { + TestScheduler.Task firstTask = assertAndGetSingleCurrentTask(); + assertThat(firstTask.getDelay()).isEqualTo(REGULAR_DELAY); + + // Verify initial task creates next one + scheduler.clearTasks(); + requestSender.enqueueResponse(createSuccessfulResponse(new ServerToAgent.Builder().build())); + firstTask.run(); + + assertThat(scheduler.getScheduledTasks()).hasSize(1); + + // Check on-demand requests don't create subsequent tasks + requestSender.enqueueResponse(createSuccessfulResponse(new ServerToAgent.Builder().build())); + httpRequestService.sendRequest(); + + assertThat(scheduler.getScheduledTasks()).hasSize(1); + } + + @Test + void verifySendingRequest_happyPath() { + ServerToAgent serverToAgent = new ServerToAgent.Builder().build(); + HttpSender.Response httpResponse = createSuccessfulResponse(serverToAgent); + requestSender.enqueueResponse(httpResponse); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + verifyRequestSuccessCallback(serverToAgent); + verify(callback).onConnectionSuccess(); + } + + @Test + void verifyWhenSendingOnDemandRequest_andDelayChanges() { + // Initial state + assertThat(assertAndGetSingleCurrentTask().getDelay()).isEqualTo(REGULAR_DELAY); + + // Trigger delay strategy change + requestSender.enqueueResponse(createFailedResponse(503)); + httpRequestService.sendRequest(); + + // Expected state + assertThat(assertAndGetSingleCurrentTask().getDelay()).isEqualTo(RETRY_DELAY); + } + + @Test + void verifySendingRequest_whenTheresAParsingError() { + HttpSender.Response httpResponse = createSuccessfulResponse(new byte[] {1, 2, 3}); + requestSender.enqueueResponse(httpResponse); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + verify(callback).onConnectionFailed(any()); + } + + @Test + void verifySendingRequest_whenThereIsAnExecutionError() + throws ExecutionException, InterruptedException, TimeoutException { + CompletableFuture future = mock(); + requestSender.enqueueResponseFuture(future); + Exception myException = mock(); + doThrow(new ExecutionException(myException)).when(future).get(30, TimeUnit.SECONDS); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + verify(callback).onConnectionFailed(myException); + } + + @Test + void verifySendingRequest_whenThereIsAnInterruptedException() + throws ExecutionException, InterruptedException, TimeoutException { + CompletableFuture future = mock(); + requestSender.enqueueResponseFuture(future); + InterruptedException myException = mock(); + doThrow(myException).when(future).get(30, TimeUnit.SECONDS); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + verify(callback).onConnectionFailed(myException); + } + + @Test + void verifySendingRequest_whenThereIsAGenericHttpError() { + requestSender.enqueueResponse(createFailedResponse(500)); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + verifyRequestFailedCallback(500); + } + + @Test + void verifySendingRequest_whenThereIsATooManyRequestsError() { + verifyRetryDelayOnError(createFailedResponse(429), RETRY_DELAY); + } + + @Test + void verifySendingRequest_whenThereIsATooManyRequestsError_withSuggestedDelay() { + HttpSender.Response response = createFailedResponse(429); + when(response.getHeader("Retry-After")).thenReturn("5"); + + verifyRetryDelayOnError(response, Duration.ofSeconds(5)); + } + + @Test + void verifySendingRequest_whenServerProvidesRetryInfo() { + long nanosecondsToWaitForRetry = 1000; + ServerErrorResponse errorResponse = + new ServerErrorResponse.Builder() + .type(ServerErrorResponseType.ServerErrorResponseType_Unavailable) + .retry_info( + new RetryInfo.Builder().retry_after_nanoseconds(nanosecondsToWaitForRetry).build()) + .build(); + ServerToAgent serverToAgent = new ServerToAgent.Builder().error_response(errorResponse).build(); + HttpSender.Response response = createSuccessfulResponse(serverToAgent); + + verifyRetryDelayOnError(response, Duration.ofNanos(nanosecondsToWaitForRetry)); + } + + @Test + void verifySendingRequest_whenServerIsUnavailable() { + ServerErrorResponse errorResponse = + new ServerErrorResponse.Builder() + .type(ServerErrorResponseType.ServerErrorResponseType_Unavailable) + .build(); + ServerToAgent serverToAgent = new ServerToAgent.Builder().error_response(errorResponse).build(); + HttpSender.Response response = createSuccessfulResponse(serverToAgent); + + verifyRetryDelayOnError(response, RETRY_DELAY); + } + + @Test + void verifySendingRequest_whenThereIsAServiceUnavailableError() { + verifyRetryDelayOnError(createFailedResponse(503), RETRY_DELAY); + } + + @Test + void verifySendingRequest_whenThereIsAServiceUnavailableError_withSuggestedDelay() { + HttpSender.Response response = createFailedResponse(503); + when(response.getHeader("Retry-After")).thenReturn("2"); + + verifyRetryDelayOnError(response, Duration.ofSeconds(2)); + } + + @Test + void verifySendingRequest_duringRegularMode() { + requestSender.enqueueResponse(createSuccessfulResponse(new ServerToAgent.Builder().build())); + + httpRequestService.sendRequest(); + + verifySingleRequestSent(); + } + + private void verifyRetryDelayOnError( + HttpSender.Response errorResponse, Duration expectedRetryDelay) { + requestSender.enqueueResponse(errorResponse); + TestScheduler.Task previousTask = assertAndGetSingleCurrentTask(); + + previousTask.run(); + + verifySingleRequestSent(); + verify(periodicRetryDelay).reset(); + verify(callback).onRequestFailed(any()); + TestScheduler.Task retryTask = assertAndGetSingleCurrentTask(); + assertThat(retryTask.getDelay()).isEqualTo(expectedRetryDelay); + + // Retry with another error + clearInvocations(callback); + scheduler.clearTasks(); + requestSender.enqueueResponse(createFailedResponse(500)); + retryTask.run(); + + verifySingleRequestSent(); + verify(callback).onRequestFailed(any()); + TestScheduler.Task retryTask2 = assertAndGetSingleCurrentTask(); + assertThat(retryTask2.getDelay()).isEqualTo(expectedRetryDelay); + + // Retry with a success + clearInvocations(callback); + scheduler.clearTasks(); + ServerToAgent serverToAgent = new ServerToAgent.Builder().build(); + requestSender.enqueueResponse(createSuccessfulResponse(serverToAgent)); + retryTask2.run(); + + verify(periodicRequestDelay).reset(); + verifySingleRequestSent(); + verifyRequestSuccessCallback(serverToAgent); + assertThat(assertAndGetSingleCurrentTask().getDelay()).isEqualTo(REGULAR_DELAY); + } + + private Request createRequest() { + AgentToServer agentToServer = new AgentToServer.Builder().sequence_num(10).build(); + requestSize = agentToServer.encodeByteString().size(); + return Request.create(agentToServer); + } + + private TestScheduler.Task assertAndGetSingleCurrentTask() { + List scheduledTasks = scheduler.getScheduledTasks(); + assertThat(scheduledTasks).hasSize(1); + return scheduledTasks.get(0); + } + + private void verifySingleRequestSent() { + List requests = requestSender.getRequests(1); + assertThat(requests.get(0).contentLength).isEqualTo(requestSize); + } + + private void verifyRequestSuccessCallback(ServerToAgent serverToAgent) { + verify(callback).onRequestSuccess(Response.create(serverToAgent)); + } + + private void verifyRequestFailedCallback(int errorCode) { + ArgumentCaptor captor = ArgumentCaptor.forClass(HttpErrorException.class); + verify(callback).onRequestFailed(captor.capture()); + assertThat(captor.getValue().getErrorCode()).isEqualTo(errorCode); + assertThat(captor.getValue().getMessage()).isEqualTo("Error message"); + } + + private static HttpSender.Response createSuccessfulResponse(ServerToAgent serverToAgent) { + return createSuccessfulResponse(serverToAgent.encodeByteString().toByteArray()); + } + + private static HttpSender.Response createSuccessfulResponse(byte[] serverToAgent) { + HttpSender.Response response = mock(); + ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(serverToAgent); + when(response.statusCode()).thenReturn(200); + when(response.bodyInputStream()).thenReturn(byteArrayInputStream); + return response; + } + + private static HttpSender.Response createFailedResponse(int statusCode) { + HttpSender.Response response = mock(); + when(response.statusCode()).thenReturn(statusCode); + when(response.statusMessage()).thenReturn("Error message"); + return response; + } + + private static PeriodicDelay createPeriodicDelay(Duration delay) { + PeriodicDelay mock = mock(); + when(mock.getNextDelay()).thenReturn(delay); + return mock; + } + + private static PeriodicDelayWithSuggestion createPeriodicDelayWithSuggestionSupport( + Duration delay) { + return spy(new PeriodicDelayWithSuggestion(delay)); + } + + private static class TestHttpSender implements HttpSender { + private final List requests = new ArrayList<>(); + + @SuppressWarnings("JdkObsolete") + private final Queue> responses = new LinkedList<>(); + + @Override + public CompletableFuture send(BodyWriter writer, int contentLength) { + requests.add(new RequestParams(contentLength)); + CompletableFuture response = null; + try { + response = responses.remove(); + } catch (NoSuchElementException e) { + fail("Unwanted triggered request"); + } + return response; + } + + void enqueueResponse(HttpSender.Response response) { + enqueueResponseFuture(CompletableFuture.completedFuture(response)); + } + + void enqueueResponseFuture(CompletableFuture future) { + responses.add(future); + } + + List getRequests(int size) { + assertThat(requests).hasSize(size); + List immutableRequests = + Collections.unmodifiableList(new ArrayList<>(requests)); + requests.clear(); + return immutableRequests; + } + + private static class RequestParams { + final int contentLength; + + private RequestParams(int contentLength) { + this.contentLength = contentLength; + } + } + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/PeriodicDelayWithSuggestion.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/PeriodicDelayWithSuggestion.java new file mode 100644 index 000000000..071067611 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/PeriodicDelayWithSuggestion.java @@ -0,0 +1,35 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import io.opentelemetry.opamp.client.internal.request.delay.AcceptsDelaySuggestion; +import io.opentelemetry.opamp.client.internal.request.delay.PeriodicDelay; +import java.time.Duration; + +public class PeriodicDelayWithSuggestion implements PeriodicDelay, AcceptsDelaySuggestion { + private final Duration initialDelay; + private Duration currentDelay; + + public PeriodicDelayWithSuggestion(Duration initialDelay) { + this.initialDelay = initialDelay; + currentDelay = initialDelay; + } + + @Override + public void suggestDelay(Duration delay) { + currentDelay = delay; + } + + @Override + public Duration getNextDelay() { + return currentDelay; + } + + @Override + public void reset() { + currentDelay = initialDelay; + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/TestScheduler.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/TestScheduler.java new file mode 100644 index 000000000..18379a421 --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/TestScheduler.java @@ -0,0 +1,120 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Delayed; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import org.jetbrains.annotations.NotNull; + +public final class TestScheduler { + private final List tasks = new ArrayList<>(); + private final ScheduledExecutorService service = createTestScheduleExecutorService(); + + public ScheduledExecutorService getMockService() { + return service; + } + + public List getScheduledTasks() { + return Collections.unmodifiableList(tasks); + } + + public void clearTasks() { + tasks.clear(); + } + + private ScheduledExecutorService createTestScheduleExecutorService() { + ScheduledExecutorService service = mock(); + + lenient() + .doAnswer( + invocation -> { + Runnable runnable = invocation.getArgument(0); + runnable.run(); + return null; + }) + .when(service) + .execute(any()); + + lenient() + .when(service.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) + .thenAnswer( + invocation -> { + Task task = new Task(invocation.getArgument(0), invocation.getArgument(1)); + + tasks.add(task); + + return task; + }); + + return service; + } + + public class Task implements ScheduledFuture { + private final Runnable runnable; + private final Duration delay; + + public void run() { + get(); + } + + private Task(Runnable runnable, long timeNanos) { + this.runnable = runnable; + this.delay = Duration.ofNanos(timeNanos); + } + + public Duration getDelay() { + return delay; + } + + @Override + public long getDelay(@NotNull TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return tasks.remove(this); + } + + @Override + public boolean isCancelled() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isDone() { + throw new UnsupportedOperationException(); + } + + @Override + public Object get() { + tasks.remove(this); + runnable.run(); + return null; + } + + @Override + public Object get(long timeout, @NotNull TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public int compareTo(@NotNull Delayed o) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestServiceTest.java b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestServiceTest.java new file mode 100644 index 000000000..6e96554ea --- /dev/null +++ b/opamp-client/src/test/java/io/opentelemetry/opamp/client/internal/request/service/WebSocketRequestServiceTest.java @@ -0,0 +1,337 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.opamp.client.internal.request.service; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.google.protobuf.CodedOutputStream; +import io.opentelemetry.opamp.client.internal.connectivity.websocket.WebSocket; +import io.opentelemetry.opamp.client.internal.request.Request; +import io.opentelemetry.opamp.client.internal.response.OpampServerResponseException; +import io.opentelemetry.opamp.client.internal.response.Response; +import io.opentelemetry.opamp.client.request.service.RequestService; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.time.Duration; +import opamp.proto.AgentToServer; +import opamp.proto.RetryInfo; +import opamp.proto.ServerErrorResponse; +import opamp.proto.ServerErrorResponseType; +import opamp.proto.ServerToAgent; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class WebSocketRequestServiceTest { + @Mock private WebSocket webSocket; + @Mock private RequestService.Callback callback; + @Mock private PeriodicDelayWithSuggestion retryDelay; + private Request request; + private TestScheduler scheduler; + private WebSocketRequestService requestService; + private static final Duration INITIAL_RETRY_DELAY = Duration.ofSeconds(1); + + @BeforeEach + void setUp() { + lenient().when(retryDelay.getNextDelay()).thenReturn(INITIAL_RETRY_DELAY); + scheduler = new TestScheduler(); + requestService = new WebSocketRequestService(webSocket, retryDelay, scheduler.getMockService()); + } + + @Test + void verifySuccessfulStart() { + startService(); + verify(webSocket).open(requestService); + + // When opening successfully, notify callback + requestService.onOpen(); + verify(callback).onConnectionSuccess(); + verifyNoMoreInteractions(callback); + + // It shouldn't allow starting again + try { + startService(); + fail(); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("The service has already started"); + } + } + + @Test + void verifyFailedStart() { + startService(); + verify(webSocket).open(requestService); + + // When failing while opening, notify callback + Throwable t = mock(); + requestService.onFailure(t); + verify(retryDelay).reset(); + verify(callback).onConnectionFailed(t); + verifyNoMoreInteractions(callback); + + // Check connection retry is scheduled + assertThat(scheduler.getScheduledTasks()).hasSize(1); + assertThat(scheduler.getScheduledTasks().get(0).getDelay()).isEqualTo(INITIAL_RETRY_DELAY); + + // It shouldn't allow starting again + try { + startService(); + fail(); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("The service has already started"); + } + + // It shouldn't schedule more than one retry at a time + clearInvocations(retryDelay, callback); + requestService.onFailure(t); + verify(callback).onConnectionFailed(t); + verifyNoInteractions(retryDelay); + verifyNoMoreInteractions(callback); + assertThat(scheduler.getScheduledTasks()).hasSize(1); + + // Execute retry with new delay + clearInvocations(webSocket, callback); + when(retryDelay.getNextDelay()).thenReturn(Duration.ofSeconds(5)); + scheduler.getScheduledTasks().get(0).run(); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + verify(webSocket).open(requestService); + + // Fail again + requestService.onFailure(t); + verify(retryDelay, never()).reset(); + verify(callback).onConnectionFailed(t); + + // A new retry has been scheduled + assertThat(scheduler.getScheduledTasks()).hasSize(1); + assertThat(scheduler.getScheduledTasks().get(0).getDelay()).isEqualTo(Duration.ofSeconds(5)); + + // Execute retry again + clearInvocations(webSocket, callback); + scheduler.getScheduledTasks().get(0).run(); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + verify(webSocket).open(requestService); + + // Succeed + requestService.onOpen(); + verify(callback).onConnectionSuccess(); + verifyNoMoreInteractions(callback); + + // Fail at some point + clearInvocations(callback); + requestService.onFailure(t); + verify(callback).onConnectionFailed(t); + verifyNoMoreInteractions(callback); + verify(retryDelay).reset(); + assertThat(scheduler.getScheduledTasks()).hasSize(1); + } + + @Test + void verifySendRequest() { + // Validate when not running + try { + requestService.sendRequest(); + fail(); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("The service is not running"); + } + + startService(); + + // Successful send + when(webSocket.send(any())).thenReturn(true); + requestService.sendRequest(); + verify(webSocket).send(getExpectedOutgoingBytes()); + + // Check there are no pending requests + clearInvocations(webSocket); + requestService.onOpen(); + verifyNoInteractions(webSocket); + + // Failed send + when(webSocket.send(any())).thenReturn(false); + requestService.sendRequest(); + clearInvocations(webSocket); + + // Check pending request + when(webSocket.send(any())).thenReturn(true); + requestService.onOpen(); + verify(webSocket).send(getExpectedOutgoingBytes()); + } + + @Test + void verifyOnMessage() { + startService(); + + // Successful message + ServerToAgent serverToAgent = new ServerToAgent.Builder().build(); + requestService.onMessage(createServerToAgentPayload(serverToAgent)); + verify(callback).onRequestSuccess(Response.create(serverToAgent)); + verifyNoMoreInteractions(callback); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + + // Regular error message + ArgumentCaptor throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + clearInvocations(callback); + serverToAgent = + new ServerToAgent.Builder() + .error_response(new ServerErrorResponse.Builder().error_message("A message").build()) + .build(); + requestService.onMessage(createServerToAgentPayload(serverToAgent)); + verify(callback).onRequestFailed(throwableCaptor.capture()); + verifyNoMoreInteractions(callback); + OpampServerResponseException error = (OpampServerResponseException) throwableCaptor.getValue(); + assertThat(error.getMessage()).isEqualTo("A message"); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + + // Error message with unavailable status + clearInvocations(callback); + serverToAgent = + new ServerToAgent.Builder() + .error_response( + new ServerErrorResponse.Builder() + .type(ServerErrorResponseType.ServerErrorResponseType_Unavailable) + .error_message("Try later") + .build()) + .build(); + requestService.onMessage(createServerToAgentPayload(serverToAgent)); + verify(callback).onRequestFailed(throwableCaptor.capture()); + verifyNoMoreInteractions(callback); + OpampServerResponseException unavailableError = + (OpampServerResponseException) throwableCaptor.getValue(); + assertThat(unavailableError.getMessage()).isEqualTo("Try later"); + assertThat(scheduler.getScheduledTasks()).hasSize(1); + verify(retryDelay, never()).suggestDelay(any()); + + // Reset scheduled retry + scheduler.getScheduledTasks().get(0).run(); + requestService.onOpen(); + + // Error message with unavailable status and suggested delay + Duration suggestedDelay = Duration.ofSeconds(10); + clearInvocations(callback, retryDelay); + serverToAgent = + new ServerToAgent.Builder() + .error_response( + new ServerErrorResponse.Builder() + .type(ServerErrorResponseType.ServerErrorResponseType_Unavailable) + .retry_info( + new RetryInfo.Builder() + .retry_after_nanoseconds(suggestedDelay.toNanos()) + .build()) + .build()) + .build(); + requestService.onMessage(createServerToAgentPayload(serverToAgent)); + verify(callback).onRequestFailed(throwableCaptor.capture()); + verifyNoMoreInteractions(callback); + OpampServerResponseException unavailableErrorWithSuggestedDelay = + (OpampServerResponseException) throwableCaptor.getValue(); + assertThat(unavailableErrorWithSuggestedDelay.getMessage()).isEmpty(); + assertThat(scheduler.getScheduledTasks()).hasSize(1); + verify(retryDelay).suggestDelay(suggestedDelay); + } + + @Test + void verifyStop() { + startService(); + + requestService.stop(); + + InOrder inOrder = inOrder(webSocket); + inOrder.verify(webSocket).send(getExpectedOutgoingBytes()); + inOrder.verify(webSocket).close(1000, null); + verify(scheduler.getMockService()).shutdown(); + + // If something fails afterward, no retry should get scheduled. + requestService.onFailure(mock()); + verifyNoInteractions(retryDelay); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + + // If onClosed is called afterward, no retry should get scheduled. + requestService.onClosed(); + verifyNoInteractions(retryDelay); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + + // If a new message with a server unavailable error arrives afterward, no retry should get + // scheduled. + ServerToAgent serverToAgent = + new ServerToAgent.Builder() + .error_response( + new ServerErrorResponse.Builder() + .type(ServerErrorResponseType.ServerErrorResponseType_Unavailable) + .build()) + .build(); + requestService.onMessage(createServerToAgentPayload(serverToAgent)); + verifyNoInteractions(retryDelay); + assertThat(scheduler.getScheduledTasks()).isEmpty(); + + // Requests cannot get enqueued afterward. + try { + requestService.sendRequest(); + fail(); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("This service is already stopped"); + } + + // The service cannot get restarted afterward. + try { + startService(); + fail(); + } catch (IllegalStateException e) { + assertThat(e).hasMessage("This service is already stopped"); + } + } + + private byte[] getExpectedOutgoingBytes() { + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + CodedOutputStream codedOutput = CodedOutputStream.newInstance(outputStream); + codedOutput.writeUInt64NoTag(0); + byte[] payload = request.getAgentToServer().encode(); + codedOutput.writeRawBytes(payload); + codedOutput.flush(); + return outputStream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static byte[] createServerToAgentPayload(ServerToAgent serverToAgent) { + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + CodedOutputStream codedOutput = CodedOutputStream.newInstance(outputStream); + codedOutput.writeUInt64NoTag(0); + codedOutput.writeRawBytes(serverToAgent.encode()); + codedOutput.flush(); + return outputStream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private void startService() { + requestService.start(callback, this::createRequest); + } + + private Request createRequest() { + AgentToServer agentToServer = new AgentToServer.Builder().sequence_num(10).build(); + request = Request.create(agentToServer); + return request; + } +} diff --git a/processors/src/main/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessor.java b/processors/src/main/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessor.java index a213ec21a..61347388a 100644 --- a/processors/src/main/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessor.java +++ b/processors/src/main/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessor.java @@ -11,10 +11,10 @@ import io.opentelemetry.sdk.logs.data.LogRecordData; import java.util.function.Predicate; -public class FilteringLogRecordProcessor implements LogRecordProcessor { +public final class FilteringLogRecordProcessor implements LogRecordProcessor { - public final LogRecordProcessor delegate; - public final Predicate predicate; + private final LogRecordProcessor delegate; + private final Predicate predicate; public FilteringLogRecordProcessor( LogRecordProcessor delegate, Predicate predicate) { diff --git a/processors/src/test/java/io/opentelemetry/contrib/eventbridge/internal/EventToSpanBridgeComponentProviderTest.java b/processors/src/test/java/io/opentelemetry/contrib/eventbridge/internal/EventToSpanBridgeComponentProviderTest.java index a2f6165ae..1e503a51c 100644 --- a/processors/src/test/java/io/opentelemetry/contrib/eventbridge/internal/EventToSpanBridgeComponentProviderTest.java +++ b/processors/src/test/java/io/opentelemetry/contrib/eventbridge/internal/EventToSpanBridgeComponentProviderTest.java @@ -18,7 +18,7 @@ class EventToSpanBridgeComponentProviderTest { @Test void endToEnd() { String yaml = - "file_format: 0.3\n" + "file_format: 1.0-rc.1\n" + "logger_provider:\n" + " processors:\n" + " - event_to_span_event_bridge:\n"; diff --git a/processors/src/test/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessorTest.java b/processors/src/test/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessorTest.java index 50405d454..05bee8aa8 100644 --- a/processors/src/test/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessorTest.java +++ b/processors/src/test/java/io/opentelemetry/contrib/filter/FilteringLogRecordProcessorTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.filter; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.logs.Logger; import io.opentelemetry.api.trace.Span; @@ -31,7 +31,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -public class FilteringLogRecordProcessorTest { +class FilteringLogRecordProcessorTest { private final InMemoryLogRecordExporter memoryLogRecordExporter = InMemoryLogRecordExporter.create(); @@ -79,7 +79,7 @@ public SdkLoggerProviderBuilder apply( logRecordData -> { SpanContext spanContext = logRecordData.getSpanContext(); return spanContext.isSampled(); - }) {}) + })) .build() .get("TestScope"); } @@ -93,14 +93,14 @@ void verifyLogFilteringExistSpanContext() { sdk.getLogsBridge().get("test").logRecordBuilder().setBody("One Log").emit(); List finishedLogRecordItems = memoryLogRecordExporter.getFinishedLogRecordItems(); - assertEquals(1, finishedLogRecordItems.size()); + assertThat(finishedLogRecordItems.size()).isEqualTo(1); try (Scope scope = span.makeCurrent()) { } finally { span.end(); } List finishedSpans = spansExporter.getFinishedSpanItems(); - assertEquals(1, finishedSpans.size()); + assertThat(finishedSpans.size()).isEqualTo(1); } } @@ -109,6 +109,6 @@ void verifyFilteringNotExitSpanContext() { logger.logRecordBuilder().setBody("One Log").emit(); List finishedLogRecordItems = memoryLogRecordExporter.getFinishedLogRecordItems(); - assertEquals(0, finishedLogRecordItems.size()); + assertThat(finishedLogRecordItems.size()).isEqualTo(0); } } diff --git a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableLogRecordExporterTest.java b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableLogRecordExporterTest.java index 3b81ce277..0096caa66 100644 --- a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableLogRecordExporterTest.java +++ b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableLogRecordExporterTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.interceptor; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -62,13 +62,13 @@ void verifyLogModification() { List finishedLogRecordItems = memoryLogRecordExporter.getFinishedLogRecordItems(); - assertEquals(1, finishedLogRecordItems.size()); + assertThat(finishedLogRecordItems.size()).isEqualTo(1); LogRecordData logRecordData = finishedLogRecordItems.get(0); - assertEquals(2, logRecordData.getAttributes().size()); - assertEquals( - "from interceptor", - logRecordData.getAttributes().get(AttributeKey.stringKey("global.attr"))); - assertEquals("local", logRecordData.getAttributes().get(AttributeKey.stringKey("local.attr"))); + assertThat(logRecordData.getAttributes().size()).isEqualTo(2); + assertThat(logRecordData.getAttributes().get(AttributeKey.stringKey("global.attr"))) + .isEqualTo("from interceptor"); + assertThat(logRecordData.getAttributes().get(AttributeKey.stringKey("local.attr"))) + .isEqualTo("local"); } @Test @@ -87,9 +87,9 @@ void verifyLogFiltering() { List finishedLogRecordItems = memoryLogRecordExporter.getFinishedLogRecordItems(); - assertEquals(2, finishedLogRecordItems.size()); - assertEquals(Value.of("One log"), finishedLogRecordItems.get(0).getBodyValue()); - assertEquals(Value.of("Another log"), finishedLogRecordItems.get(1).getBodyValue()); + assertThat(finishedLogRecordItems.size()).isEqualTo(2); + assertThat(finishedLogRecordItems.get(0).getBodyValue()).isEqualTo(Value.of("One log")); + assertThat(finishedLogRecordItems.get(1).getBodyValue()).isEqualTo(Value.of("Another log")); } private static class ModifiableLogRecordData implements LogRecordData { diff --git a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableMetricExporterTest.java b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableMetricExporterTest.java index 6b12d5f1a..f321b8a7b 100644 --- a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableMetricExporterTest.java +++ b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableMetricExporterTest.java @@ -6,7 +6,6 @@ package io.opentelemetry.contrib.interceptor; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.contrib.interceptor.common.ComposableInterceptor; @@ -55,8 +54,8 @@ void verifyMetricModification() { meterProvider.forceFlush(); List finishedMetricItems = memoryMetricExporter.getFinishedMetricItems(); - assertEquals(1, finishedMetricItems.size()); - assertEquals("ModifiedName", finishedMetricItems.get(0).getName()); + assertThat(finishedMetricItems.size()).isEqualTo(1); + assertThat(finishedMetricItems.get(0).getName()).isEqualTo("ModifiedName"); } @Test @@ -75,7 +74,7 @@ void verifyMetricFiltering() { meterProvider.forceFlush(); List finishedMetricItems = memoryMetricExporter.getFinishedMetricItems(); - assertEquals(2, finishedMetricItems.size()); + assertThat(finishedMetricItems.size()).isEqualTo(2); List names = new ArrayList<>(); for (MetricData item : finishedMetricItems) { names.add(item.getName()); diff --git a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableSpanExporterTest.java b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableSpanExporterTest.java index 35c3afaaa..26242174f 100644 --- a/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableSpanExporterTest.java +++ b/processors/src/test/java/io/opentelemetry/contrib/interceptor/InterceptableSpanExporterTest.java @@ -5,7 +5,7 @@ package io.opentelemetry.contrib.interceptor; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -51,12 +51,12 @@ void verifySpanModification() { tracer.spanBuilder("Test span").setAttribute("local.attr", 10).startSpan().end(); List finishedSpanItems = memorySpanExporter.getFinishedSpanItems(); - assertEquals(1, finishedSpanItems.size()); + assertThat(finishedSpanItems.size()).isEqualTo(1); SpanData spanData = finishedSpanItems.get(0); - assertEquals(2, spanData.getAttributes().size()); - assertEquals( - "from interceptor", spanData.getAttributes().get(AttributeKey.stringKey("global.attr"))); - assertEquals(10, spanData.getAttributes().get(AttributeKey.longKey("local.attr"))); + assertThat(spanData.getAttributes().size()).isEqualTo(2); + assertThat(spanData.getAttributes().get(AttributeKey.stringKey("global.attr"))) + .isEqualTo("from interceptor"); + assertThat(spanData.getAttributes().get(AttributeKey.longKey("local.attr"))).isEqualTo(10L); } @Test @@ -74,15 +74,15 @@ void verifySpanFiltering() { tracer.spanBuilder("Another span").startSpan().end(); List finishedSpanItems = memorySpanExporter.getFinishedSpanItems(); - assertEquals(2, finishedSpanItems.size()); - assertEquals("One span", finishedSpanItems.get(0).getName()); - assertEquals("Another span", finishedSpanItems.get(1).getName()); + assertThat(finishedSpanItems.size()).isEqualTo(2); + assertThat(finishedSpanItems.get(0).getName()).isEqualTo("One span"); + assertThat(finishedSpanItems.get(1).getName()).isEqualTo("Another span"); } private static class ModifiableSpanData extends DelegatingSpanData { private final AttributesBuilder attributes = Attributes.builder(); - protected ModifiableSpanData(SpanData delegate) { + ModifiableSpanData(SpanData delegate) { super(delegate); } diff --git a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/MetricAdapter.java b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/MetricAdapter.java index c0b381e7d..e71331964 100644 --- a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/MetricAdapter.java +++ b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/MetricAdapter.java @@ -6,6 +6,8 @@ package io.opentelemetry.contrib.metrics.prometheus.clientbridge; import static io.prometheus.client.Collector.doubleToGoString; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanContext; @@ -28,7 +30,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import javax.annotation.Nullable; @@ -54,6 +55,8 @@ final class MetricAdapter { static final String LABEL_NAME_QUANTILE = "quantile"; static final String LABEL_NAME_LE = "le"; + static final Function sanitizer = new NameSanitizer(); + // Converts a MetricData to a Prometheus MetricFamilySamples. static MetricFamilySamples toMetricFamilySamples(MetricData metricData) { String cleanMetricName = cleanMetricName(metricData.getName()); @@ -99,8 +102,6 @@ static Collector.Type toMetricFamilyType(MetricData metricData) { return Collector.Type.UNKNOWN; } - static final Function sanitizer = new NameSanitizer(); - // Converts a list of points from MetricData to a list of Prometheus Samples. static List toSamples( String name, MetricDataType type, Collection points) { @@ -291,7 +292,7 @@ private static Sample createSample( labelValues, value, toPrometheusExemplar(exemplar), - TimeUnit.MILLISECONDS.convert(timestampNanos, TimeUnit.NANOSECONDS)); + MILLISECONDS.convert(timestampNanos, NANOSECONDS)); } return new Sample( name, @@ -299,7 +300,7 @@ private static Sample createSample( labelValues, value, null, - TimeUnit.MILLISECONDS.convert(timestampNanos, TimeUnit.NANOSECONDS)); + MILLISECONDS.convert(timestampNanos, NANOSECONDS)); } private static io.prometheus.client.exemplars.Exemplar toPrometheusExemplar( @@ -309,7 +310,7 @@ private static io.prometheus.client.exemplars.Exemplar toPrometheusExemplar( return new io.prometheus.client.exemplars.Exemplar( getExemplarValue(exemplar), // Convert to ms for prometheus, truncate nanosecond precision. - TimeUnit.NANOSECONDS.toMillis(exemplar.getEpochNanos()), + NANOSECONDS.toMillis(exemplar.getEpochNanos()), "trace_id", spanContext.getTraceId(), "span_id", diff --git a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/PrometheusCollector.java b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/PrometheusCollector.java index 3acc1096a..e9c574f92 100644 --- a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/PrometheusCollector.java +++ b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/PrometheusCollector.java @@ -5,6 +5,8 @@ package io.opentelemetry.contrib.metrics.prometheus.clientbridge; +import static java.util.Collections.unmodifiableList; + import io.opentelemetry.sdk.common.CompletableResultCode; import io.opentelemetry.sdk.metrics.InstrumentType; import io.opentelemetry.sdk.metrics.data.AggregationTemporality; @@ -15,7 +17,6 @@ import io.prometheus.client.CollectorRegistry; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -79,7 +80,7 @@ public List collect() { for (MetricData metricData : allMetrics) { allSamples.add(MetricAdapter.toMetricFamilySamples(metricData)); } - return Collections.unmodifiableList(allSamples); + return unmodifiableList(allSamples); } } } diff --git a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/Serializer.java b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/Serializer.java index d696a14b7..d79892a27 100644 --- a/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/Serializer.java +++ b/prometheus-client-bridge/src/main/java/io/opentelemetry/contrib/metrics/prometheus/clientbridge/Serializer.java @@ -21,11 +21,12 @@ package io.opentelemetry.contrib.metrics.prometheus.clientbridge; +import static java.util.Collections.emptyList; + import io.opentelemetry.sdk.metrics.data.HistogramPointData; import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.data.PointData; import java.util.Collection; -import java.util.Collections; import java.util.List; /** Serializes metrics into Prometheus exposition formats. */ @@ -71,7 +72,7 @@ static Collection getPoints(MetricData metricData) { case EXPONENTIAL_HISTOGRAM: return metricData.getExponentialHistogramData().getPoints(); } - return Collections.emptyList(); + return emptyList(); } private Serializer() {} diff --git a/resource-providers/build.gradle.kts b/resource-providers/build.gradle.kts index 1dc20be51..4dc0a28d5 100644 --- a/resource-providers/build.gradle.kts +++ b/resource-providers/build.gradle.kts @@ -11,6 +11,7 @@ dependencies { compileOnly("com.google.auto.service:auto-service") compileOnly("io.opentelemetry:opentelemetry-api") + compileOnly("io.opentelemetry:opentelemetry-api-incubator") compileOnly("io.opentelemetry:opentelemetry-sdk") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") @@ -18,4 +19,5 @@ dependencies { testImplementation("io.opentelemetry.semconv:opentelemetry-semconv") testImplementation("com.google.auto.service:auto-service") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + testImplementation("io.opentelemetry:opentelemetry-api-incubator") } diff --git a/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerResourceDetector.java b/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerResourceDetector.java new file mode 100644 index 000000000..d97785cca --- /dev/null +++ b/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerResourceDetector.java @@ -0,0 +1,31 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.resourceproviders; + +import com.google.auto.service.AutoService; +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.resources.Resource; + +@SuppressWarnings("rawtypes") +@AutoService(ComponentProvider.class) +public final class AppServerResourceDetector implements ComponentProvider { + + @Override + public Class getType() { + return Resource.class; + } + + @Override + public String getName() { + return "app_server"; + } + + @Override + public Resource create(DeclarativeConfigProperties config) { + return new AppServerServiceNameProvider().create(); + } +} diff --git a/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerServiceNameProvider.java b/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerServiceNameProvider.java index 505dbef84..5ce7196f2 100644 --- a/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerServiceNameProvider.java +++ b/resource-providers/src/main/java/io/opentelemetry/contrib/resourceproviders/AppServerServiceNameProvider.java @@ -38,6 +38,10 @@ public AppServerServiceNameProvider() { @Override public Resource createResource(ConfigProperties config) { + return create(); + } + + Resource create() { String serviceName = detectServiceName(); if (serviceName == null) { logger.log( diff --git a/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/JettyServiceNameDetectorTest.java b/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/JettyServiceNameDetectorTest.java index b48f4685a..75d5664a2 100644 --- a/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/JettyServiceNameDetectorTest.java +++ b/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/JettyServiceNameDetectorTest.java @@ -6,8 +6,7 @@ package io.opentelemetry.contrib.resourceproviders; import static io.opentelemetry.contrib.resourceproviders.JettyAppServer.parseJettyBase; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.nio.file.Files; @@ -15,21 +14,21 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -public class JettyServiceNameDetectorTest { +class JettyServiceNameDetectorTest { @Test void testJettyBase(@TempDir Path tempDir) throws IOException { - assertNull(parseJettyBase(null)); - assertNull(parseJettyBase("")); - assertNull(parseJettyBase("jetty.base=")); - assertEquals(tempDir.toString(), parseJettyBase("jetty.base=" + tempDir).toString()); - assertEquals( - tempDir.toString(), parseJettyBase("foo jetty.base=" + tempDir + " bar").toString()); + assertThat(parseJettyBase(null)).isNull(); + assertThat(parseJettyBase("")).isNull(); + assertThat(parseJettyBase("jetty.base=")).isNull(); + assertThat(parseJettyBase("jetty.base=" + tempDir).toString()).isEqualTo(tempDir.toString()); + assertThat(parseJettyBase("foo jetty.base=" + tempDir + " bar").toString()) + .isEqualTo(tempDir.toString()); Path otherDir = tempDir.resolve("jetty test"); Files.createDirectory(otherDir); - assertEquals(otherDir.toString(), parseJettyBase("jetty.base=" + otherDir).toString()); - assertEquals( - otherDir.toString(), parseJettyBase("foo jetty.base=" + otherDir + " bar").toString()); + assertThat(parseJettyBase("jetty.base=" + otherDir).toString()).isEqualTo(otherDir.toString()); + assertThat(parseJettyBase("foo jetty.base=" + otherDir + " bar").toString()) + .isEqualTo(otherDir.toString()); } } diff --git a/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/ResourceComponentProviderTest.java b/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/ResourceComponentProviderTest.java new file mode 100644 index 000000000..91ea7b216 --- /dev/null +++ b/resource-providers/src/test/java/io/opentelemetry/contrib/resourceproviders/ResourceComponentProviderTest.java @@ -0,0 +1,24 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.resourceproviders; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.common.ComponentLoader; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import org.junit.jupiter.api.Test; + +class ResourceComponentProviderTest { + + @Test + @SuppressWarnings("rawtypes") + void providerIsLoaded() { + Iterable providers = + ComponentLoader.forClassLoader(ResourceComponentProviderTest.class.getClassLoader()) + .load(ComponentProvider.class); + assertThat(providers).extracting(ComponentProvider::getName).contains("app_server"); + } +} diff --git a/runtime-attach/runtime-attach-core/build.gradle.kts b/runtime-attach/runtime-attach-core/build.gradle.kts index 445224d32..0e06aaf52 100644 --- a/runtime-attach/runtime-attach-core/build.gradle.kts +++ b/runtime-attach/runtime-attach-core/build.gradle.kts @@ -7,7 +7,7 @@ description = "To help in create an OpenTelemetry distro able to runtime attach otelJava.moduleName.set("io.opentelemetry.contrib.attach.core") dependencies { - implementation("net.bytebuddy:byte-buddy-agent:1.17.5") + implementation("net.bytebuddy:byte-buddy-agent:1.17.8") // Used by byte-buddy but not brought in as a transitive dependency. compileOnly("com.google.code.findbugs:annotations") diff --git a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AbstractAttachmentTest.java b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AbstractAttachmentTest.java index 43d89b2ef..611dde652 100644 --- a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AbstractAttachmentTest.java +++ b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AbstractAttachmentTest.java @@ -8,7 +8,7 @@ import io.opentelemetry.javaagent.shaded.io.opentelemetry.api.trace.Span; import org.junit.jupiter.api.BeforeAll; -public class AbstractAttachmentTest { +class AbstractAttachmentTest { @BeforeAll static void disableMainThreadCheck() { diff --git a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledByEnvironmentVariableTest.java b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledByEnvironmentVariableTest.java index 41571a2e4..91a67d3cd 100644 --- a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledByEnvironmentVariableTest.java +++ b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledByEnvironmentVariableTest.java @@ -10,7 +10,7 @@ import io.opentelemetry.instrumentation.annotations.WithSpan; import org.junit.jupiter.api.Test; -public class AgentDisabledByEnvironmentVariableTest extends AbstractAttachmentTest { +class AgentDisabledByEnvironmentVariableTest extends AbstractAttachmentTest { @Test void shouldNotAttachWhenAgentDisabledWithEnvVariable() { diff --git a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledBySystemPropertyTest.java b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledBySystemPropertyTest.java index a24c81296..39d941ff3 100644 --- a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledBySystemPropertyTest.java +++ b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/AgentDisabledBySystemPropertyTest.java @@ -10,7 +10,7 @@ import io.opentelemetry.instrumentation.annotations.WithSpan; import org.junit.jupiter.api.Test; -public class AgentDisabledBySystemPropertyTest extends AbstractAttachmentTest { +class AgentDisabledBySystemPropertyTest extends AbstractAttachmentTest { @Test void shouldNotAttachWhenAgentDisabledWithProperty() { diff --git a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/RunTimeAttachBasicTest.java b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/RunTimeAttachBasicTest.java index 5d8f201ae..65eabcf02 100644 --- a/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/RunTimeAttachBasicTest.java +++ b/runtime-attach/runtime-attach/src/test/java/io/opentelemetry/contrib/attach/RunTimeAttachBasicTest.java @@ -10,7 +10,7 @@ import io.opentelemetry.instrumentation.annotations.WithSpan; import org.junit.jupiter.api.Test; -public class RunTimeAttachBasicTest extends AbstractAttachmentTest { +class RunTimeAttachBasicTest extends AbstractAttachmentTest { @Test void shouldAttach() { diff --git a/samplers/README.md b/samplers/README.md index 4be98779b..fcd21df10 100644 --- a/samplers/README.md +++ b/samplers/README.md @@ -8,11 +8,11 @@ The following samplers support [declarative configuration](https://opentelemetry To use: -* Add a dependency on `io.opentelemetry:opentelemetry-sdk-extension-incubator:` -* Follow the [instructions](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/incubator/README.md#file-configuration) to configure OpenTelemetry with declarative configuration. +* Add a dependency on `io.opentelemetry.contrib:opentelemetry-samplers:` +* Follow the [instructions](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/incubator/README.md#declarative-configuration) to configure OpenTelemetry with declarative configuration. * Configure the `.tracer_provider.sampler` to include the `rule_based_routing` sampler. -NOTE: Not yet available for use with the OTEL java agent, but should be in the near future. Please check back for updates. +Support is now available for the java agent, see an [example here](https://github.com/open-telemetry/opentelemetry-java-examples/blob/main/javaagent). Schema for `rule_based_routing` sampler: diff --git a/samplers/src/main/java/io/opentelemetry/contrib/sampler/LinksParentAlwaysOnSamplerProvider.java b/samplers/src/main/java/io/opentelemetry/contrib/sampler/LinksParentAlwaysOnSamplerProvider.java index 45f341897..8024ab590 100644 --- a/samplers/src/main/java/io/opentelemetry/contrib/sampler/LinksParentAlwaysOnSamplerProvider.java +++ b/samplers/src/main/java/io/opentelemetry/contrib/sampler/LinksParentAlwaysOnSamplerProvider.java @@ -9,7 +9,7 @@ import io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSamplerProvider; import io.opentelemetry.sdk.trace.samplers.Sampler; -public class LinksParentAlwaysOnSamplerProvider implements ConfigurableSamplerProvider { +public final class LinksParentAlwaysOnSamplerProvider implements ConfigurableSamplerProvider { @Override public Sampler createSampler(ConfigProperties config) { return LinksBasedSampler.create(Sampler.parentBased(Sampler.alwaysOn())); diff --git a/samplers/src/main/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProvider.java b/samplers/src/main/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProvider.java index 9e7fb4971..b81cdb546 100644 --- a/samplers/src/main/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProvider.java +++ b/samplers/src/main/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProvider.java @@ -49,7 +49,7 @@ public Sampler create(DeclarativeConfigProperties config) { fallbackSampler = DeclarativeConfiguration.createSampler(fallbackModel); } catch (DeclarativeConfigException e) { throw new DeclarativeConfigException( - "rule_Based_routing sampler failed to create .fallback sampler", e); + "rule_based_routing sampler failed to create .fallback sampler", e); } String spanKindString = config.getString("span_kind", "SERVER"); @@ -92,7 +92,7 @@ public Sampler create(DeclarativeConfigProperties config) { builder.drop(attributeKey, pattern); } else { throw new DeclarativeConfigException( - "rule_based_routing sampler .rules[].action is must be " + "rule_based_routing sampler .rules[].action must be " + ACTION_RECORD_AND_SAMPLE + " or " + ACTION_DROP); diff --git a/samplers/src/test/java/io/opentelemetry/contrib/sampler/RuleBasedRoutingSamplerTest.java b/samplers/src/test/java/io/opentelemetry/contrib/sampler/RuleBasedRoutingSamplerTest.java index 05a4301e6..62b1bee31 100644 --- a/samplers/src/test/java/io/opentelemetry/contrib/sampler/RuleBasedRoutingSamplerTest.java +++ b/samplers/src/test/java/io/opentelemetry/contrib/sampler/RuleBasedRoutingSamplerTest.java @@ -63,7 +63,7 @@ public void setup() { } @Test - public void testThatThrowsOnNullParameter() { + void testThatThrowsOnNullParameter() { assertThatExceptionOfType(NullPointerException.class) .isThrownBy(() -> new RuleBasedRoutingSampler(patterns, SPAN_KIND, null)); @@ -98,7 +98,7 @@ public void testThatThrowsOnNullParameter() { } @Test - public void testThatDelegatesIfNoRulesGiven() { + void testThatDelegatesIfNoRulesGiven() { RuleBasedRoutingSampler sampler = RuleBasedRoutingSampler.builder(SPAN_KIND, delegate).build(); // no http.url attribute @@ -117,7 +117,7 @@ public void testThatDelegatesIfNoRulesGiven() { } @Test - public void testDropOnExactMatch() { + void testDropOnExactMatch() { RuleBasedRoutingSampler sampler = addRules(RuleBasedRoutingSampler.builder(SPAN_KIND, delegate)).build(); assertThat(shouldSample(sampler, "https://example.com/healthcheck").getDecision()) @@ -125,7 +125,7 @@ public void testDropOnExactMatch() { } @Test - public void testDelegateOnDifferentKind() { + void testDelegateOnDifferentKind() { RuleBasedRoutingSampler sampler = addRules(RuleBasedRoutingSampler.builder(SpanKind.CLIENT, delegate)).build(); assertThat(shouldSample(sampler, "https://example.com/healthcheck").getDecision()) @@ -134,7 +134,7 @@ public void testDelegateOnDifferentKind() { } @Test - public void testDelegateOnNoMatch() { + void testDelegateOnNoMatch() { RuleBasedRoutingSampler sampler = addRules(RuleBasedRoutingSampler.builder(SPAN_KIND, delegate)).build(); assertThat(shouldSample(sampler, "https://example.com/customers").getDecision()) @@ -143,7 +143,7 @@ public void testDelegateOnNoMatch() { } @Test - public void testDelegateOnMalformedUrl() { + void testDelegateOnMalformedUrl() { RuleBasedRoutingSampler sampler = addRules(RuleBasedRoutingSampler.builder(SPAN_KIND, delegate)).build(); assertThat(shouldSample(sampler, "abracadabra").getDecision()) @@ -158,7 +158,7 @@ public void testDelegateOnMalformedUrl() { } @Test - public void testVerifiesAllGivenAttributes() { + void testVerifiesAllGivenAttributes() { RuleBasedRoutingSampler sampler = addRules(RuleBasedRoutingSampler.builder(SPAN_KIND, delegate)).build(); Attributes attributes = Attributes.of(URL_PATH, "/actuator/info"); diff --git a/samplers/src/test/java/internal/RuleBasedRoutingSamplerComponentProviderTest.java b/samplers/src/test/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProviderTest.java similarity index 96% rename from samplers/src/test/java/internal/RuleBasedRoutingSamplerComponentProviderTest.java rename to samplers/src/test/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProviderTest.java index 384707843..cc2ae4129 100644 --- a/samplers/src/test/java/internal/RuleBasedRoutingSamplerComponentProviderTest.java +++ b/samplers/src/test/java/io/opentelemetry/contrib/sampler/internal/RuleBasedRoutingSamplerComponentProviderTest.java @@ -3,7 +3,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -package internal; +package io.opentelemetry.contrib.sampler.internal; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -15,7 +15,6 @@ import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.context.Context; import io.opentelemetry.contrib.sampler.RuleBasedRoutingSampler; -import io.opentelemetry.contrib.sampler.internal.RuleBasedRoutingSamplerComponentProvider; import io.opentelemetry.sdk.OpenTelemetrySdk; import io.opentelemetry.sdk.extension.incubator.fileconfig.DeclarativeConfiguration; import io.opentelemetry.sdk.trace.IdGenerator; @@ -38,7 +37,7 @@ class RuleBasedRoutingSamplerComponentProviderTest { @Test void endToEnd() { String yaml = - "file_format: 0.3\n" + "file_format: 1.0-rc.1\n" + "tracer_provider:\n" + " sampler:\n" + " parent_based:\n" @@ -173,7 +172,7 @@ static Stream createInvalidArgs() { + "rules:\n" + " - attribute: url.path\n" + " pattern: path\n", - "rule_Based_routing sampler failed to create .fallback sampler"), + "rule_based_routing sampler failed to create .fallback sampler"), Arguments.of( "fallback_sampler:\n" + " always_on:\n" @@ -218,6 +217,6 @@ static Stream createInvalidArgs() { + " - attribute: url.path\n" + " pattern: path\n" + " action: foo\n", - "rule_based_routing sampler .rules[].action is must be RECORD_AND_SAMPLE or DROP")); + "rule_based_routing sampler .rules[].action must be RECORD_AND_SAMPLE or DROP")); } } diff --git a/settings.gradle.kts b/settings.gradle.kts index ef819d82a..1df4d33ad 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -1,8 +1,8 @@ pluginManagement { plugins { - id("com.github.johnrengelman.shadow") version "8.1.1" + id("com.gradleup.shadow") version "9.2.2" id("io.github.gradle-nexus.publish-plugin") version "2.0.0" - id("com.gradle.develocity") version "4.0" + id("com.gradle.develocity") version "4.2.1" } } @@ -29,12 +29,17 @@ develocity { publishing.onlyIf { System.getenv("CI") != null } termsOfUseUrl.set("https://gradle.com/help/legal-terms-of-use") termsOfUseAgree.set("yes") + + buildScanPublished { + File("build-scan.txt").printWriter().use { writer -> + writer.println(buildScanUri) + } + } } } rootProject.name = "opentelemetry-java-contrib" -include(":all") include(":aws-resources") include(":aws-xray") include(":aws-xray-propagator") @@ -45,7 +50,7 @@ include(":cloudfoundry-resources") include(":consistent-sampling") include(":dependencyManagement") include(":disk-buffering") -include(":example") +include(":ibm-mq-metrics") include(":jfr-events") include(":jfr-connection") include(":jmx-metrics") diff --git a/span-stacktrace/README.md b/span-stacktrace/README.md index 92eef3bbe..ecd31ac85 100644 --- a/span-stacktrace/README.md +++ b/span-stacktrace/README.md @@ -1,4 +1,3 @@ - # Span stacktrace capture This module provides a `SpanProcessor` that captures the [`code.stacktrace`](https://opentelemetry.io/docs/specs/semconv/attributes-registry/code/). @@ -25,6 +24,21 @@ SDK when included in the application runtime dependencies. - value is the class name of a class implementing `java.util.function.Predicate` - filter class must be publicly accessible and provide a no-arg constructor +### Usage with declarative configuration + +You can enable the stacktrace span processor using declarative YAML configuration with the OpenTelemetry SDK. For example: + +```yaml +file_format: 1.0-rc.1 +tracer_provider: + processors: + - experimental_stacktrace: + min_duration: 10 # minimal duration in ms, default is 5, MUST be an integer + filter: my.class.Name # optional, default is to include all spans +``` + +This configuration will register the StackTraceSpanProcessor for all spans. + ## Component owners - [Jack Shirazi](https://github.com/jackshirazi), Elastic diff --git a/span-stacktrace/build.gradle.kts b/span-stacktrace/build.gradle.kts index b8316af62..57f299754 100644 --- a/span-stacktrace/build.gradle.kts +++ b/span-stacktrace/build.gradle.kts @@ -15,10 +15,16 @@ dependencies { compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") + compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") + compileOnly("io.opentelemetry:opentelemetry-sdk-extension-incubator") + compileOnly("io.opentelemetry.instrumentation:opentelemetry-declarative-config-bridge") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") + testImplementation("io.opentelemetry:opentelemetry-sdk-extension-incubator") + testImplementation("io.opentelemetry.instrumentation:opentelemetry-declarative-config-bridge") - testImplementation("io.opentelemetry.semconv:opentelemetry-semconv-incubating") + compileOnly("io.opentelemetry.semconv:opentelemetry-semconv") + testImplementation("io.opentelemetry.semconv:opentelemetry-semconv") testAnnotationProcessor("com.google.auto.service:auto-service") testCompileOnly("com.google.auto.service:auto-service-annotations") diff --git a/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfig.java b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfig.java index 2315d2a10..4417c0f86 100644 --- a/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfig.java +++ b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfig.java @@ -5,6 +5,9 @@ package io.opentelemetry.contrib.stacktrace; +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.SEVERE; + import com.google.auto.service.AutoService; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; @@ -23,25 +26,26 @@ public class StackTraceAutoConfig implements AutoConfigurationCustomizerProvider private static final Logger log = Logger.getLogger(StackTraceAutoConfig.class.getName()); - private static final String CONFIG_MIN_DURATION = - "otel.java.experimental.span-stacktrace.min.duration"; + static final String PREFIX = "otel.java.experimental.span-stacktrace."; + static final String CONFIG_MIN_DURATION = PREFIX + "min.duration"; private static final Duration CONFIG_MIN_DURATION_DEFAULT = Duration.ofMillis(5); - - private static final String CONFIG_FILTER = "otel.java.experimental.span-stacktrace.filter"; + private static final String CONFIG_FILTER = PREFIX + "filter"; @Override public void customize(AutoConfigurationCustomizer config) { config.addTracerProviderCustomizer( (providerBuilder, properties) -> { - long minDuration = getMinDuration(properties); - if (minDuration >= 0) { - Predicate filter = getFilterPredicate(properties); - providerBuilder.addSpanProcessor(new StackTraceSpanProcessor(minDuration, filter)); + if (getMinDuration(properties) >= 0) { + providerBuilder.addSpanProcessor(create(properties)); } return providerBuilder; }); } + static StackTraceSpanProcessor create(ConfigProperties properties) { + return new StackTraceSpanProcessor(getMinDuration(properties), getFilterPredicate(properties)); + } + // package-private for testing static long getMinDuration(ConfigProperties properties) { long minDuration = @@ -50,7 +54,7 @@ static long getMinDuration(ConfigProperties properties) { log.fine("Stack traces capture is disabled"); } else { log.log( - Level.FINE, + FINE, "Stack traces will be added to spans with a minimum duration of {0} nanos", minDuration); } @@ -70,7 +74,7 @@ static Predicate getFilterPredicate(ConfigProperties properties) { if (filter == null) { // if value is set, lack of filtering is likely an error and must be reported - Level disabledLogLevel = filterClass != null ? Level.SEVERE : Level.FINE; + Level disabledLogLevel = filterClass != null ? SEVERE : FINE; log.log(disabledLogLevel, "Span stacktrace filtering disabled"); return span -> true; } else { diff --git a/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProvider.java b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProvider.java new file mode 100644 index 000000000..338ebd960 --- /dev/null +++ b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.stacktrace; + +import com.google.auto.service.AutoService; +import io.opentelemetry.api.incubator.config.DeclarativeConfigProperties; +import io.opentelemetry.instrumentation.config.bridge.DeclarativeConfigPropertiesBridgeBuilder; +import io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider; +import io.opentelemetry.sdk.trace.SpanProcessor; + +@SuppressWarnings("rawtypes") +@AutoService(ComponentProvider.class) +public class StackTraceComponentProvider implements ComponentProvider { + @Override + public String getName() { + return "experimental_stacktrace"; + } + + @Override + public SpanProcessor create(DeclarativeConfigProperties config) { + return StackTraceAutoConfig.create( + new DeclarativeConfigPropertiesBridgeBuilder() + .addMapping(StackTraceAutoConfig.CONFIG_MIN_DURATION, "min_duration") + .addMapping(StackTraceAutoConfig.PREFIX, "") + .build(config)); + } + + @Override + public Class getType() { + return SpanProcessor.class; + } +} diff --git a/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessor.java b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessor.java index 62fb9ff34..441e07446 100644 --- a/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessor.java +++ b/span-stacktrace/src/main/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessor.java @@ -5,21 +5,17 @@ package io.opentelemetry.contrib.stacktrace; -import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.context.Context; import io.opentelemetry.sdk.trace.ReadWriteSpan; import io.opentelemetry.sdk.trace.ReadableSpan; import io.opentelemetry.sdk.trace.internal.ExtendedSpanProcessor; +import io.opentelemetry.semconv.CodeAttributes; import java.io.PrintWriter; import java.io.StringWriter; import java.util.function.Predicate; public class StackTraceSpanProcessor implements ExtendedSpanProcessor { - // inlined incubating attribute to prevent direct dependency on incubating semconv - private static final AttributeKey SPAN_STACKTRACE = - AttributeKey.stringKey("code.stacktrace"); - private final long minSpanDurationNanos; private final Predicate filterPredicate; @@ -56,14 +52,14 @@ public void onEnding(ReadWriteSpan span) { if (span.getLatencyNanos() < minSpanDurationNanos) { return; } - if (span.getAttribute(SPAN_STACKTRACE) != null) { + if (span.getAttribute(CodeAttributes.CODE_STACKTRACE) != null) { // Span already has a stacktrace, do not override return; } if (!filterPredicate.test(span)) { return; } - span.setAttribute(SPAN_STACKTRACE, generateSpanEndStacktrace()); + span.setAttribute(CodeAttributes.CODE_STACKTRACE, generateSpanEndStacktrace()); } @Override @@ -98,4 +94,14 @@ private static String removeInternalFrames(String stackTrace) { } return stackTrace.substring(nextNewLine + 1); } + + @Override + public String toString() { + return "StackTraceSpanProcessor{" + + "minSpanDurationNanos=" + + minSpanDurationNanos + + ", filterPredicate=" + + filterPredicate + + '}'; + } } diff --git a/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfigTest.java b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfigTest.java index 4b8f999f4..ca1517480 100644 --- a/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfigTest.java +++ b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceAutoConfigTest.java @@ -15,7 +15,7 @@ import java.util.function.Predicate; import org.junit.jupiter.api.Test; -public class StackTraceAutoConfigTest { +class StackTraceAutoConfigTest { @Test void defaultConfig() { diff --git a/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProviderTest.java b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProviderTest.java new file mode 100644 index 000000000..ee55d726c --- /dev/null +++ b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceComponentProviderTest.java @@ -0,0 +1,41 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.stacktrace; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.extension.incubator.fileconfig.DeclarativeConfiguration; +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; + +class StackTraceComponentProviderTest { + @Test + void endToEnd() { + String yaml = + "file_format: 1.0-rc.1\n" + + "tracer_provider:\n" + + " processors:\n" + + " - experimental_stacktrace: \n" + + " min_duration: 100\n" + + " filter: io.opentelemetry.contrib.stacktrace.StackTraceSpanProcessorTest$YesPredicate\n"; + + OpenTelemetrySdk openTelemetrySdk = + DeclarativeConfiguration.parseAndCreate( + new ByteArrayInputStream(yaml.getBytes(StandardCharsets.UTF_8))); + + assertThat(openTelemetrySdk.getSdkTracerProvider().toString()) + .contains( + String.format( + Locale.ROOT, + "StackTraceSpanProcessor{minSpanDurationNanos=%d, " + + "filterPredicate=io.opentelemetry.contrib.stacktrace.StackTraceSpanProcessorTest$YesPredicate", + TimeUnit.MILLISECONDS.toNanos(100))); + } +} diff --git a/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessorTest.java b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessorTest.java index 3d6d4686d..e5be32019 100644 --- a/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessorTest.java +++ b/span-stacktrace/src/test/java/io/opentelemetry/contrib/stacktrace/StackTraceSpanProcessorTest.java @@ -19,7 +19,7 @@ import io.opentelemetry.sdk.trace.ReadableSpan; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SpanExporter; -import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes; +import io.opentelemetry.semconv.CodeAttributes; import java.time.Duration; import java.time.Instant; import java.util.HashMap; @@ -89,7 +89,7 @@ void spanWithExistingStackTrace() { YesPredicate.class, "1ms", Duration.ofMillis(1).toNanos(), - sb -> sb.setAttribute(CodeIncubatingAttributes.CODE_STACKTRACE, "hello"), + sb -> sb.setAttribute(CodeAttributes.CODE_STACKTRACE, "hello"), stacktrace -> assertThat(stacktrace).isEqualTo("hello")); } @@ -169,8 +169,7 @@ private static void checkSpan( List finishedSpans = spansExporter.getFinishedSpanItems(); assertThat(finishedSpans).hasSize(1); - String stackTrace = - finishedSpans.get(0).getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE); + String stackTrace = finishedSpans.get(0).getAttributes().get(CodeAttributes.CODE_STACKTRACE); stackTraceCheck.accept(stackTrace); } diff --git a/version.gradle.kts b/version.gradle.kts index 0ae7508cb..54918ebc5 100644 --- a/version.gradle.kts +++ b/version.gradle.kts @@ -1,5 +1,5 @@ -val stableVersion = "1.47.0-SNAPSHOT" -val alphaVersion = "1.47.0-alpha-SNAPSHOT" +val stableVersion = "1.51.0-SNAPSHOT" +val alphaVersion = "1.51.0-alpha-SNAPSHOT" allprojects { if (findProperty("otel.stable") != "true") {