From 48ebd53e2181e148ea3b527b6d5e0885a7022f41 Mon Sep 17 00:00:00 2001 From: RafaelGSS Date: Tue, 21 Jan 2025 13:21:43 -0300 Subject: [PATCH 001/158] Working on v23.6.2 PR-URL: https://github.com/nodejs-private/node-private/pull/654 Signed-off-by: RafaelGSS --- src/node_version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node_version.h b/src/node_version.h index b1712e3ac1c382..7b699b6f6ed735 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -24,12 +24,12 @@ #define NODE_MAJOR_VERSION 23 #define NODE_MINOR_VERSION 6 -#define NODE_PATCH_VERSION 1 +#define NODE_PATCH_VERSION 2 #define NODE_VERSION_IS_LTS 0 #define NODE_VERSION_LTS_CODENAME "" -#define NODE_VERSION_IS_RELEASE 1 +#define NODE_VERSION_IS_RELEASE 0 #ifndef NODE_STRINGIFY #define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n) From 3aa864904f8b920d5eb72c848912108c05a21fd2 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Fri, 3 Jan 2025 13:24:51 -0500 Subject: [PATCH 002/158] test_runner: finish marking snapshot testing as stable Snapshot testing was marked stable in #55897. These were overlooked at the time. Refs: https://github.com/nodejs/node/pull/55897 PR-URL: https://github.com/nodejs/node/pull/56425 Reviewed-By: Yagiz Nizipli Reviewed-By: Chemi Atlow Reviewed-By: Jacob Smith Reviewed-By: Pietro Marchini Reviewed-By: Moshe Atlow --- doc/api/test.md | 16 +++++++--------- lib/internal/test_runner/snapshot.js | 6 +----- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/doc/api/test.md b/doc/api/test.md index faae49cc92dd8a..4aeec3b4593eb8 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -932,7 +932,13 @@ test('runs timers as setTime passes ticks', (context) => { ## Snapshot testing -> Stability: 1.0 - Early development + Snapshot tests allow arbitrary values to be serialized into string values and compared against a set of known good values. The known good values are known as @@ -1748,8 +1754,6 @@ describe('tests', async () => { added: v22.3.0 --> -> Stability: 1.0 - Early development - An object whose methods are used to configure default snapshot settings in the current process. It is possible to apply the same configuration to all files by placing common configuration code in a module preloaded with `--require` or @@ -1761,8 +1765,6 @@ placing common configuration code in a module preloaded with `--require` or added: v22.3.0 --> -> Stability: 1.0 - Early development - * `serializers` {Array} An array of synchronous functions used as the default serializers for snapshot tests. @@ -1778,8 +1780,6 @@ more robust serialization mechanism is required, this function should be used. added: v22.3.0 --> -> Stability: 1.0 - Early development - * `fn` {Function} A function used to compute the location of the snapshot file. The function receives the path of the test file as its only argument. If the test is not associated with a file (for example in the REPL), the input is @@ -3259,8 +3259,6 @@ test('test', (t) => { added: v22.3.0 --> -> Stability: 1.0 - Early development - * `value` {any} A value to serialize to a string. If Node.js was started with the [`--test-update-snapshots`][] flag, the serialized value is written to the snapshot file. Otherwise, the serialized value is compared to the diff --git a/lib/internal/test_runner/snapshot.js b/lib/internal/test_runner/snapshot.js index 7e41a0bf76f0cd..e6fcd71552c939 100644 --- a/lib/internal/test_runner/snapshot.js +++ b/lib/internal/test_runner/snapshot.js @@ -15,7 +15,7 @@ const { ERR_INVALID_STATE, }, } = require('internal/errors'); -const { emitExperimentalWarning, kEmptyObject } = require('internal/util'); +const { kEmptyObject } = require('internal/util'); let debug = require('internal/util/debuglog').debuglog('test_runner', (fn) => { debug = fn; }); @@ -28,7 +28,6 @@ const { strictEqual } = require('assert'); const { mkdirSync, readFileSync, writeFileSync } = require('fs'); const { dirname } = require('path'); const { createContext, runInContext } = require('vm'); -const kExperimentalWarning = 'Snapshot testing'; const kMissingSnapshotTip = 'Missing snapshots can be generated by rerunning ' + 'the command with the --test-update-snapshots flag.'; const defaultSerializers = [ @@ -47,13 +46,11 @@ let resolveSnapshotPathFn = defaultResolveSnapshotPath; let serializerFns = defaultSerializers; function setResolveSnapshotPath(fn) { - emitExperimentalWarning(kExperimentalWarning); validateFunction(fn, 'fn'); resolveSnapshotPathFn = fn; } function setDefaultSnapshotSerializers(serializers) { - emitExperimentalWarning(kExperimentalWarning); validateFunctionArray(serializers, 'serializers'); serializerFns = ArrayPrototypeSlice(serializers); } @@ -207,7 +204,6 @@ class SnapshotManager { const manager = this; return function snapshotAssertion(actual, options = kEmptyObject) { - emitExperimentalWarning(kExperimentalWarning); validateObject(options, 'options'); const { serializers = serializerFns, From c249c9715acc58742cf9dcf58846f02236b09dea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 18:37:45 +0000 Subject: [PATCH 003/158] tools: bump the eslint group in /tools/eslint with 4 updates Bumps the eslint group in /tools/eslint with 4 updates: [@stylistic/eslint-plugin-js](https://github.com/eslint-stylistic/eslint-stylistic/tree/HEAD/packages/eslint-plugin-js), [eslint](https://github.com/eslint/eslint), [eslint-plugin-jsdoc](https://github.com/gajus/eslint-plugin-jsdoc) and [globals](https://github.com/sindresorhus/globals). Updates `@stylistic/eslint-plugin-js` from 2.11.0 to 2.12.1 - [Release notes](https://github.com/eslint-stylistic/eslint-stylistic/releases) - [Changelog](https://github.com/eslint-stylistic/eslint-stylistic/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint-stylistic/eslint-stylistic/commits/v2.12.1/packages/eslint-plugin-js) Updates `eslint` from 9.16.0 to 9.17.0 - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/compare/v9.16.0...v9.17.0) Updates `eslint-plugin-jsdoc` from 50.6.0 to 50.6.1 - [Release notes](https://github.com/gajus/eslint-plugin-jsdoc/releases) - [Changelog](https://github.com/gajus/eslint-plugin-jsdoc/blob/main/.releaserc) - [Commits](https://github.com/gajus/eslint-plugin-jsdoc/compare/v50.6.0...v50.6.1) Updates `globals` from 15.12.0 to 15.14.0 - [Release notes](https://github.com/sindresorhus/globals/releases) - [Commits](https://github.com/sindresorhus/globals/compare/v15.12.0...v15.14.0) --- updated-dependencies: - dependency-name: "@stylistic/eslint-plugin-js" dependency-type: direct:production update-type: version-update:semver-minor dependency-group: eslint - dependency-name: eslint dependency-type: direct:production update-type: version-update:semver-minor dependency-group: eslint - dependency-name: eslint-plugin-jsdoc dependency-type: direct:production update-type: version-update:semver-patch dependency-group: eslint - dependency-name: globals dependency-type: direct:production update-type: version-update:semver-minor dependency-group: eslint ... Signed-off-by: dependabot[bot] PR-URL: https://github.com/nodejs/node/pull/56426 Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: Marco Ippolito --- tools/eslint/package-lock.json | 55 ++++++++++++++++------------------ tools/eslint/package.json | 8 ++--- 2 files changed, 29 insertions(+), 34 deletions(-) diff --git a/tools/eslint/package-lock.json b/tools/eslint/package-lock.json index ddae8841e1ea39..7d57c1b57129b2 100644 --- a/tools/eslint/package-lock.json +++ b/tools/eslint/package-lock.json @@ -11,12 +11,12 @@ "@babel/core": "^7.26.0", "@babel/eslint-parser": "^7.25.9", "@babel/plugin-syntax-import-attributes": "^7.26.0", - "@stylistic/eslint-plugin-js": "^2.11.0", - "eslint": "^9.16.0", + "@stylistic/eslint-plugin-js": "^2.12.1", + "eslint": "^9.17.0", "eslint-formatter-tap": "^8.40.0", - "eslint-plugin-jsdoc": "^50.6.0", + "eslint-plugin-jsdoc": "^50.6.1", "eslint-plugin-markdown": "^5.1.0", - "globals": "^15.12.0" + "globals": "^15.14.0" } }, "node_modules/@ampproject/remapping": { @@ -382,9 +382,9 @@ } }, "node_modules/@eslint/js": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", - "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", + "version": "9.17.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.17.0.tgz", + "integrity": "sha512-Sxc4hqcs1kTu0iID3kcZDW3JHq2a77HO9P8CP6YEA/FpH3Ll8UXE2r/86Rz9YJLKme39S9vU5OWNjC6Xl0Cr3w==", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -535,9 +535,9 @@ } }, "node_modules/@stylistic/eslint-plugin-js": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/@stylistic/eslint-plugin-js/-/eslint-plugin-js-2.11.0.tgz", - "integrity": "sha512-btchD0P3iij6cIk5RR5QMdEhtCCV0+L6cNheGhGCd//jaHILZMTi/EOqgEDAf1s4ZoViyExoToM+S2Iwa3U9DA==", + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/@stylistic/eslint-plugin-js/-/eslint-plugin-js-2.12.1.tgz", + "integrity": "sha512-5ybogtEgWIGCR6dMnaabztbWyVdAPDsf/5XOk6jBonWug875Q9/a6gm9QxnU3rhdyDEnckWKX7dduwYJMOWrVA==", "dependencies": { "eslint-visitor-keys": "^4.2.0", "espree": "^10.3.0" @@ -757,9 +757,9 @@ "license": "MIT" }, "node_modules/cross-spawn": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.5.tgz", - "integrity": "sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -812,16 +812,16 @@ } }, "node_modules/eslint": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.16.0.tgz", - "integrity": "sha512-whp8mSQI4C8VXd+fLgSM0lh3UlmcFtVwUQjyKCFfsp+2ItAIYhlq/hqGahGqHE6cv9unM41VlqKk2VtKYR2TaA==", + "version": "9.17.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.17.0.tgz", + "integrity": "sha512-evtlNcpJg+cZLcnVKwsai8fExnqjGPicK7gnUtlNuzu+Fv9bI0aLpND5T44VLQtoMEnI57LoXO9XAkIXwohKrA==", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.19.0", "@eslint/core": "^0.9.0", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "9.16.0", + "@eslint/js": "9.17.0", "@eslint/plugin-kit": "^0.2.3", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", @@ -830,7 +830,7 @@ "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.5", + "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.2.0", @@ -882,9 +882,9 @@ } }, "node_modules/eslint-plugin-jsdoc": { - "version": "50.6.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-50.6.0.tgz", - "integrity": "sha512-tCNp4fR79Le3dYTPB0dKEv7yFyvGkUCa+Z3yuTrrNGGOxBlXo9Pn0PEgroOZikUQOGjxoGMVKNjrOHcYEdfszg==", + "version": "50.6.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-50.6.1.tgz", + "integrity": "sha512-UWyaYi6iURdSfdVVqvfOs2vdCVz0J40O/z/HTsv2sFjdjmdlUI/qlKLOTmwbPQ2tAfQnE5F9vqx+B+poF71DBQ==", "dependencies": { "@es-joy/jsdoccomment": "~0.49.0", "are-docs-informative": "^0.0.2", @@ -1254,9 +1254,9 @@ } }, "node_modules/globals": { - "version": "15.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-15.12.0.tgz", - "integrity": "sha512-1+gLErljJFhbOVyaetcwJiJ4+eLe45S2E7P5UiZ9xGfeq3ATQf5DOv9G7MH3gGbKQLkzmNh2DxfZwLdw+j6oTQ==", + "version": "15.14.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.14.0.tgz", + "integrity": "sha512-OkToC372DtlQeje9/zHIo5CT8lRP/FUgEOKBEhU4e0abL7J7CD24fD9ohiLN5hagG/kWCYj4K5oaxxtj2Z0Dig==", "engines": { "node": ">=18" }, @@ -1364,8 +1364,7 @@ "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, "node_modules/js-tokens": { "version": "4.0.0", @@ -1660,7 +1659,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "license": "MIT", "engines": { "node": ">=8" } @@ -1708,7 +1706,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -1720,7 +1717,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "license": "MIT", "engines": { "node": ">=8" } @@ -1852,7 +1848,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, diff --git a/tools/eslint/package.json b/tools/eslint/package.json index 59b2e661aa96f1..68bedee0cb10f9 100644 --- a/tools/eslint/package.json +++ b/tools/eslint/package.json @@ -6,11 +6,11 @@ "@babel/core": "^7.26.0", "@babel/eslint-parser": "^7.25.9", "@babel/plugin-syntax-import-attributes": "^7.26.0", - "@stylistic/eslint-plugin-js": "^2.11.0", - "eslint": "^9.16.0", + "@stylistic/eslint-plugin-js": "^2.12.1", + "eslint": "^9.17.0", "eslint-formatter-tap": "^8.40.0", - "eslint-plugin-jsdoc": "^50.6.0", + "eslint-plugin-jsdoc": "^50.6.1", "eslint-plugin-markdown": "^5.1.0", - "globals": "^15.12.0" + "globals": "^15.14.0" } } From 7a0dd2d04f724abd561a7acb61f30e8f39c84ad2 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Fri, 3 Jan 2025 20:25:17 +0100 Subject: [PATCH 004/158] tools: use a configurable value for number of open dependabot PRs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way, we can disable all dependabot PRs from private forks. PR-URL: https://github.com/nodejs/node/pull/56427 Reviewed-By: Chemi Atlow Reviewed-By: Michaël Zasso Reviewed-By: Luigi Pinca --- .github/dependabot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 33aaa6304fee00..b9770e23a2e353 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,7 +8,7 @@ updates: interval: monthly commit-message: prefix: meta - open-pull-requests-limit: 10 + open-pull-requests-limit: ${{secrets.OPEN_PR_LIMIT}} - package-ecosystem: npm directory: /tools/eslint @@ -16,7 +16,7 @@ updates: interval: monthly commit-message: prefix: tools - open-pull-requests-limit: 10 + open-pull-requests-limit: ${{secrets.OPEN_PR_LIMIT}} groups: eslint: applies-to: version-updates @@ -29,7 +29,7 @@ updates: interval: monthly commit-message: prefix: tools - open-pull-requests-limit: 10 + open-pull-requests-limit: ${{secrets.OPEN_PR_LIMIT}} groups: lint-md: applies-to: version-updates From aea088f79eb1ee5765410741310db8f0f0123aec Mon Sep 17 00:00:00 2001 From: Rich Trott Date: Fri, 3 Jan 2025 15:12:08 -0800 Subject: [PATCH 005/158] tools: disable unneeded rule ignoring in Python linting Removing PLC1901 and RUF100 from the list of Python lint rules to ignore does not result in any errors. Keeping the ignore list as short as possible seems like a good idea, so this change removes them from the ignore list. PR-URL: https://github.com/nodejs/node/pull/56429 Reviewed-By: Yagiz Nizipli Reviewed-By: Christian Clauss Reviewed-By: Luigi Pinca Reviewed-By: Chengzhong Wu --- pyproject.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8e97e3b4446293..03f53aa6bed6bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,9 +33,7 @@ ignore = [ "E401", "E402", "E7", - "PLC1901", "RUF005", - "RUF100", ] [tool.ruff.lint.mccabe] From 332ce548cbd78cf852deb7fec1b6d33942c6f446 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Sat, 4 Jan 2025 12:18:49 -0500 Subject: [PATCH 006/158] test: update test-child-process-windows-hide to use node:test This commit updates test/parallel/test-child-process-windows-hide.js to use node:test. This allows the test to use the built in mocking functionality instead of managing spies manually. It also prevents multiple child processes from being spawned in parallel, which can be problematic in the CI. PR-URL: https://github.com/nodejs/node/pull/56437 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell --- .../test-child-process-windows-hide.js | 47 +++++++++---------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/test/parallel/test-child-process-windows-hide.js b/test/parallel/test-child-process-windows-hide.js index ef4a8be8784ebc..c218c901a7f2ea 100644 --- a/test/parallel/test-child-process-windows-hide.js +++ b/test/parallel/test-child-process-windows-hide.js @@ -3,49 +3,48 @@ const common = require('../common'); const assert = require('assert'); const cp = require('child_process'); +const { test } = require('node:test'); const internalCp = require('internal/child_process'); const cmd = process.execPath; const args = ['-p', '42']; const options = { windowsHide: true }; -// Since windowsHide isn't really observable, monkey patch spawn() and -// spawnSync() to verify that the flag is being passed through correctly. -const originalSpawn = internalCp.ChildProcess.prototype.spawn; -const originalSpawnSync = internalCp.spawnSync; +// Since windowsHide isn't really observable, this test relies on monkey +// patching spawn() and spawnSync() to verify that the flag is being passed +// through correctly. -internalCp.ChildProcess.prototype.spawn = common.mustCall(function(options) { - assert.strictEqual(options.windowsHide, true); - return originalSpawn.apply(this, arguments); -}, 2); - -internalCp.spawnSync = common.mustCall(function(options) { - assert.strictEqual(options.windowsHide, true); - return originalSpawnSync.apply(this, arguments); -}); - -{ +test('spawnSync() passes windowsHide correctly', (t) => { + const spy = t.mock.method(internalCp, 'spawnSync'); const child = cp.spawnSync(cmd, args, options); assert.strictEqual(child.status, 0); assert.strictEqual(child.signal, null); assert.strictEqual(child.stdout.toString().trim(), '42'); assert.strictEqual(child.stderr.toString().trim(), ''); -} + assert.strictEqual(spy.mock.calls.length, 1); + assert.strictEqual(spy.mock.calls[0].arguments[0].windowsHide, true); +}); -{ +test('spawn() passes windowsHide correctly', (t, done) => { + const spy = t.mock.method(internalCp.ChildProcess.prototype, 'spawn'); const child = cp.spawn(cmd, args, options); child.on('exit', common.mustCall((code, signal) => { assert.strictEqual(code, 0); assert.strictEqual(signal, null); + assert.strictEqual(spy.mock.calls.length, 1); + assert.strictEqual(spy.mock.calls[0].arguments[0].windowsHide, true); + done(); })); -} +}); -{ - const callback = common.mustSucceed((stdout, stderr) => { +test('execFile() passes windowsHide correctly', (t, done) => { + const spy = t.mock.method(internalCp.ChildProcess.prototype, 'spawn'); + cp.execFile(cmd, args, options, common.mustSucceed((stdout, stderr) => { assert.strictEqual(stdout.trim(), '42'); assert.strictEqual(stderr.trim(), ''); - }); - - cp.execFile(cmd, args, options, callback); -} + assert.strictEqual(spy.mock.calls.length, 1); + assert.strictEqual(spy.mock.calls[0].arguments[0].windowsHide, true); + done(); + })); +}); From 3143566045b4f0a4a3d2d154cef4053d10cb2736 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Sat, 4 Jan 2025 13:30:04 -0500 Subject: [PATCH 007/158] test_runner: add assert.register() API This commit adds a top level assert.register() API to the test runner. This function allows users to define their own custom assertion functions on the TestContext. Fixes: https://github.com/nodejs/node/issues/52033 PR-URL: https://github.com/nodejs/node/pull/56434 Reviewed-By: Jacob Smith Reviewed-By: Matteo Collina Reviewed-By: Pietro Marchini --- doc/api/test.md | 23 +++++++ lib/internal/test_runner/assert.js | 50 +++++++++++++++ lib/internal/test_runner/test.js | 52 ++++++--------- lib/test.js | 12 ++++ .../parallel/test-runner-custom-assertions.js | 63 +++++++++++++++++++ 5 files changed, 166 insertions(+), 34 deletions(-) create mode 100644 lib/internal/test_runner/assert.js create mode 100644 test/parallel/test-runner-custom-assertions.js diff --git a/doc/api/test.md b/doc/api/test.md index 4aeec3b4593eb8..2a41055d794409 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -1748,6 +1748,29 @@ describe('tests', async () => { }); ``` +## `assert` + + + +An object whose methods are used to configure available assertions on the +`TestContext` objects in the current process. The methods from `node:assert` +and snapshot testing functions are available by default. + +It is possible to apply the same configuration to all files by placing common +configuration code in a module +preloaded with `--require` or `--import`. + +### `assert.register(name, fn)` + + + +Defines a new assertion function with the provided name and function. If an +assertion already exists with the same name, it is overwritten. + ## `snapshot` * `namedParameters` {Object} An optional object used to bind named parameters. The keys of this object are used to configure the mapping. -* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or +* `...anonymousParameters` {null|number|bigint|string|Buffer|TypedArray|DataView} Zero or more values to bind to anonymous parameters. * Returns: {Array} An array of objects. Each object corresponds to a row returned by executing the prepared statement. The keys and values of each @@ -361,11 +365,15 @@ execution of this prepared statement. This property is a wrapper around * `namedParameters` {Object} An optional object used to bind named parameters. The keys of this object are used to configure the mapping. -* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or +* `...anonymousParameters` {null|number|bigint|string|Buffer|TypedArray|DataView} Zero or more values to bind to anonymous parameters. * Returns: {Object|undefined} An object corresponding to the first row returned by executing the prepared statement. The keys and values of the object @@ -381,11 +389,15 @@ values in `namedParameters` and `anonymousParameters`. * `namedParameters` {Object} An optional object used to bind named parameters. The keys of this object are used to configure the mapping. -* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or +* `...anonymousParameters` {null|number|bigint|string|Buffer|TypedArray|DataView} Zero or more values to bind to anonymous parameters. * Returns: {Iterator} An iterable iterator of objects. Each object corresponds to a row returned by executing the prepared statement. The keys and values of each @@ -400,11 +412,15 @@ the values in `namedParameters` and `anonymousParameters`. * `namedParameters` {Object} An optional object used to bind named parameters. The keys of this object are used to configure the mapping. -* `...anonymousParameters` {null|number|bigint|string|Buffer|Uint8Array} Zero or +* `...anonymousParameters` {null|number|bigint|string|Buffer|TypedArray|DataView} Zero or more values to bind to anonymous parameters. * Returns: {Object} * `changes`: {number|bigint} The number of rows modified, inserted, or deleted diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index abd85a98c5aebb..373931a76a54de 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -960,7 +960,7 @@ bool StatementSync::BindParams(const FunctionCallbackInfo& args) { int anon_idx = 1; int anon_start = 0; - if (args[0]->IsObject() && !args[0]->IsUint8Array()) { + if (args[0]->IsObject() && !args[0]->IsArrayBufferView()) { Local obj = args[0].As(); Local context = obj->GetIsolate()->GetCurrentContext(); Local keys; @@ -1065,7 +1065,7 @@ bool StatementSync::BindValue(const Local& value, const int index) { statement_, index, *val, val.length(), SQLITE_TRANSIENT); } else if (value->IsNull()) { r = sqlite3_bind_null(statement_, index); - } else if (value->IsUint8Array()) { + } else if (value->IsArrayBufferView()) { ArrayBufferViewContents buf(value); r = sqlite3_bind_blob( statement_, index, buf.data(), buf.length(), SQLITE_TRANSIENT); diff --git a/test/parallel/test-sqlite-typed-array-and-data-view.js b/test/parallel/test-sqlite-typed-array-and-data-view.js new file mode 100644 index 00000000000000..1cc75c541b6261 --- /dev/null +++ b/test/parallel/test-sqlite-typed-array-and-data-view.js @@ -0,0 +1,61 @@ +'use strict'; +require('../common'); +const tmpdir = require('../common/tmpdir'); +const { join } = require('node:path'); +const { DatabaseSync } = require('node:sqlite'); +const { suite, test } = require('node:test'); +let cnt = 0; + +tmpdir.refresh(); + +function nextDb() { + return join(tmpdir.path, `database-${cnt++}.db`); +} + +const arrayBuffer = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]).buffer; +const TypedArrays = [ + ['Int8Array', Int8Array], + ['Uint8Array', Uint8Array], + ['Uint8ClampedArray', Uint8ClampedArray], + ['Int16Array', Int16Array], + ['Uint16Array', Uint16Array], + ['Int32Array', Int32Array], + ['Uint32Array', Uint32Array], + ['Float32Array', Float32Array], + ['Float64Array', Float64Array], + ['BigInt64Array', BigInt64Array], + ['BigUint64Array', BigUint64Array], + ['DataView', DataView], +]; + +suite('StatementSync with TypedArray/DataView', () => { + for (const [displayName, TypedArray] of TypedArrays) { + test(displayName, (t) => { + const db = new DatabaseSync(nextDb()); + t.after(() => { db.close(); }); + db.exec('CREATE TABLE test (data BLOB)'); + // insert + { + const stmt = db.prepare('INSERT INTO test VALUES (?)'); + stmt.run(new TypedArray(arrayBuffer)); + } + // select all + { + const stmt = db.prepare('SELECT * FROM test'); + const row = stmt.get(); + t.assert.ok(row.data instanceof Uint8Array); + t.assert.strictEqual(row.data.length, 8); + t.assert.deepStrictEqual(row.data, new Uint8Array(arrayBuffer)); + } + // query + { + const stmt = db.prepare('SELECT * FROM test WHERE data = ?'); + const rows = stmt.all(new TypedArray(arrayBuffer)); + t.assert.strictEqual(rows.length, 1); + t.assert.ok(rows[0].data instanceof Uint8Array); + t.assert.strictEqual(rows[0].data.length, 8); + t.assert.deepStrictEqual(rows[0].data, new Uint8Array(arrayBuffer)); + } + }); + } +}); From f185e8a34a94a652585612482c3514180f7b52b9 Mon Sep 17 00:00:00 2001 From: Chengzhong Wu Date: Sun, 5 Jan 2025 11:43:44 +0000 Subject: [PATCH 014/158] inspector: report loadingFinished until the response data is consumed The `Network.loadingFinished` should be deferred until the response is complete and the data is fully consumed. Also, report correct request url with the specified port by retrieving the host from the request headers. PR-URL: https://github.com/nodejs/node/pull/56372 Refs: https://github.com/nodejs/node/issues/53946 Reviewed-By: James M Snell Reviewed-By: Kohei Ueno --- lib/internal/inspector/network.js | 31 +++ lib/internal/inspector/network_http.js | 132 ++++++++++ lib/internal/inspector_network_tracking.js | 99 +------ src/node_builtins.cc | 2 + .../parallel/test-inspector-network-domain.js | 206 --------------- test/parallel/test-inspector-network-http.js | 241 ++++++++++++++++++ 6 files changed, 412 insertions(+), 299 deletions(-) create mode 100644 lib/internal/inspector/network.js create mode 100644 lib/internal/inspector/network_http.js delete mode 100644 test/parallel/test-inspector-network-domain.js create mode 100644 test/parallel/test-inspector-network-http.js diff --git a/lib/internal/inspector/network.js b/lib/internal/inspector/network.js new file mode 100644 index 00000000000000..18424bee569302 --- /dev/null +++ b/lib/internal/inspector/network.js @@ -0,0 +1,31 @@ +'use strict'; + +const { + NumberMAX_SAFE_INTEGER, + Symbol, +} = primordials; + +const { now } = require('internal/perf/utils'); +const kInspectorRequestId = Symbol('kInspectorRequestId'); + +/** + * Return a monotonically increasing time in seconds since an arbitrary point in the past. + * @returns {number} + */ +function getMonotonicTime() { + return now() / 1000; +} + +let requestId = 0; +function getNextRequestId() { + if (requestId === NumberMAX_SAFE_INTEGER) { + requestId = 0; + } + return `node-network-event-${++requestId}`; +}; + +module.exports = { + kInspectorRequestId, + getMonotonicTime, + getNextRequestId, +}; diff --git a/lib/internal/inspector/network_http.js b/lib/internal/inspector/network_http.js new file mode 100644 index 00000000000000..87a33b419b1aed --- /dev/null +++ b/lib/internal/inspector/network_http.js @@ -0,0 +1,132 @@ +'use strict'; + +const { + ArrayIsArray, + DateNow, + ObjectEntries, + String, + Symbol, +} = primordials; + +const { + kInspectorRequestId, + getMonotonicTime, + getNextRequestId, +} = require('internal/inspector/network'); +const dc = require('diagnostics_channel'); +const { Network } = require('inspector'); + +const kResourceType = 'Other'; +const kRequestUrl = Symbol('kRequestUrl'); + +// Convert a Headers object (Map) to a plain object (Map) +const convertHeaderObject = (headers = {}) => { + // The 'host' header that contains the host and port of the URL. + let host; + const dict = {}; + for (const { 0: key, 1: value } of ObjectEntries(headers)) { + if (key.toLowerCase() === 'host') { + host = value; + } + if (typeof value === 'string') { + dict[key] = value; + } else if (ArrayIsArray(value)) { + if (key.toLowerCase() === 'cookie') dict[key] = value.join('; '); + // ChromeDevTools frontend treats 'set-cookie' as a special case + // https://github.com/ChromeDevTools/devtools-frontend/blob/4275917f84266ef40613db3c1784a25f902ea74e/front_end/core/sdk/NetworkRequest.ts#L1368 + else if (key.toLowerCase() === 'set-cookie') dict[key] = value.join('\n'); + else dict[key] = value.join(', '); + } else { + dict[key] = String(value); + } + } + return [host, dict]; +}; + +/** + * When a client request starts, emit Network.requestWillBeSent event. + * https://chromedevtools.github.io/devtools-protocol/1-3/Network/#event-requestWillBeSent + * @param {{ request: import('http').ClientRequest }} event + */ +function onClientRequestStart({ request }) { + request[kInspectorRequestId] = getNextRequestId(); + + const { 0: host, 1: headers } = convertHeaderObject(request.getHeaders()); + const url = `${request.protocol}//${host}${request.path}`; + request[kRequestUrl] = url; + + Network.requestWillBeSent({ + requestId: request[kInspectorRequestId], + timestamp: getMonotonicTime(), + wallTime: DateNow(), + request: { + url, + method: request.method, + headers, + }, + }); +} + +/** + * When a client request errors, emit Network.loadingFailed event. + * https://chromedevtools.github.io/devtools-protocol/1-3/Network/#event-loadingFailed + * @param {{ request: import('http').ClientRequest, error: any }} event + */ +function onClientRequestError({ request, error }) { + if (typeof request[kInspectorRequestId] !== 'string') { + return; + } + Network.loadingFailed({ + requestId: request[kInspectorRequestId], + timestamp: getMonotonicTime(), + type: kResourceType, + errorText: error.message, + }); +} + +/** + * When response headers are received, emit Network.responseReceived event. + * https://chromedevtools.github.io/devtools-protocol/1-3/Network/#event-responseReceived + * @param {{ request: import('http').ClientRequest, error: any }} event + */ +function onClientResponseFinish({ request, response }) { + if (typeof request[kInspectorRequestId] !== 'string') { + return; + } + Network.responseReceived({ + requestId: request[kInspectorRequestId], + timestamp: getMonotonicTime(), + type: kResourceType, + response: { + url: request[kRequestUrl], + status: response.statusCode, + statusText: response.statusMessage ?? '', + headers: convertHeaderObject(response.headers)[1], + }, + }); + + // Wait until the response body is consumed by user code. + response.once('end', () => { + Network.loadingFinished({ + requestId: request[kInspectorRequestId], + timestamp: getMonotonicTime(), + }); + }); +} + +function enable() { + dc.subscribe('http.client.request.start', onClientRequestStart); + dc.subscribe('http.client.request.error', onClientRequestError); + dc.subscribe('http.client.response.finish', onClientResponseFinish); +} + +function disable() { + dc.unsubscribe('http.client.request.start', onClientRequestStart); + dc.unsubscribe('http.client.request.error', onClientRequestError); + dc.unsubscribe('http.client.response.finish', onClientResponseFinish); +} + +module.exports = { + enable, + disable, +}; diff --git a/lib/internal/inspector_network_tracking.js b/lib/internal/inspector_network_tracking.js index de325baf77eb42..9158bb48f745f8 100644 --- a/lib/internal/inspector_network_tracking.js +++ b/lib/internal/inspector_network_tracking.js @@ -1,102 +1,15 @@ 'use strict'; -const { - ArrayIsArray, - DateNow, - ObjectEntries, - String, -} = primordials; - -let dc; -let Network; - -let requestId = 0; -const getNextRequestId = () => `node-network-event-${++requestId}`; - -// Convert a Headers object (Map) to a plain object (Map) -const headerObjectToDictionary = (headers = {}) => { - const dict = {}; - for (const { 0: key, 1: value } of ObjectEntries(headers)) { - if (typeof value === 'string') { - dict[key] = value; - } else if (ArrayIsArray(value)) { - if (key.toLowerCase() === 'cookie') dict[key] = value.join('; '); - // ChromeDevTools frontend treats 'set-cookie' as a special case - // https://github.com/ChromeDevTools/devtools-frontend/blob/4275917f84266ef40613db3c1784a25f902ea74e/front_end/core/sdk/NetworkRequest.ts#L1368 - else if (key.toLowerCase() === 'set-cookie') dict[key] = value.join('\n'); - else dict[key] = value.join(', '); - } else { - dict[key] = String(value); - } - } - return dict; -}; - -function onClientRequestStart({ request }) { - const url = `${request.protocol}//${request.host}${request.path}`; - const wallTime = DateNow(); - const timestamp = wallTime / 1000; - request._inspectorRequestId = getNextRequestId(); - Network.requestWillBeSent({ - requestId: request._inspectorRequestId, - timestamp, - wallTime, - request: { - url, - method: request.method, - headers: headerObjectToDictionary(request.getHeaders()), - }, - }); -} - -function onClientRequestError({ request, error }) { - if (typeof request._inspectorRequestId !== 'string') { - return; - } - const timestamp = DateNow() / 1000; - Network.loadingFailed({ - requestId: request._inspectorRequestId, - timestamp, - type: 'Other', - errorText: error.message, - }); -} - -function onClientResponseFinish({ request, response }) { - if (typeof request._inspectorRequestId !== 'string') { - return; - } - const url = `${request.protocol}//${request.host}${request.path}`; - const timestamp = DateNow() / 1000; - Network.responseReceived({ - requestId: request._inspectorRequestId, - timestamp, - type: 'Other', - response: { - url, - status: response.statusCode, - statusText: response.statusMessage ?? '', - headers: headerObjectToDictionary(response.headers), - }, - }); - Network.loadingFinished({ - requestId: request._inspectorRequestId, - timestamp, - }); -} - function enable() { - dc ??= require('diagnostics_channel'); - Network ??= require('inspector').Network; - dc.subscribe('http.client.request.start', onClientRequestStart); - dc.subscribe('http.client.request.error', onClientRequestError); - dc.subscribe('http.client.response.finish', onClientResponseFinish); + require('internal/inspector/network_http').enable(); + // TODO: add undici request/websocket tracking. + // https://github.com/nodejs/node/issues/53946 } function disable() { - dc.unsubscribe('http.client.request.start', onClientRequestStart); - dc.unsubscribe('http.client.request.error', onClientRequestError); - dc.unsubscribe('http.client.response.finish', onClientResponseFinish); + require('internal/inspector/network_http').disable(); + // TODO: add undici request/websocket tracking. + // https://github.com/nodejs/node/issues/53946 } module.exports = { diff --git a/src/node_builtins.cc b/src/node_builtins.cc index e5955903261397..791c16ce3942d7 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -119,6 +119,8 @@ BuiltinLoader::BuiltinCategories BuiltinLoader::GetBuiltinCategories() const { builtin_categories.cannot_be_required = std::set { #if !HAVE_INSPECTOR "inspector", "inspector/promises", "internal/util/inspector", + "internal/inspector/network", "internal/inspector/network_http", + "internal/inspector_async_hook", "internal/inspector_network_tracking", #endif // !HAVE_INSPECTOR #if !NODE_USE_V8_PLATFORM || !defined(NODE_HAVE_I18N_SUPPORT) diff --git a/test/parallel/test-inspector-network-domain.js b/test/parallel/test-inspector-network-domain.js deleted file mode 100644 index d2a56dca95a4ff..00000000000000 --- a/test/parallel/test-inspector-network-domain.js +++ /dev/null @@ -1,206 +0,0 @@ -// Flags: --inspect=0 --experimental-network-inspection -'use strict'; -const common = require('../common'); - -common.skipIfInspectorDisabled(); - -const assert = require('node:assert'); -const { addresses } = require('../common/internet'); -const fixtures = require('../common/fixtures'); -const http = require('node:http'); -const https = require('node:https'); -const inspector = require('node:inspector/promises'); - -const session = new inspector.Session(); -session.connect(); - -const requestHeaders = { - 'accept-language': 'en-US', - 'Cookie': ['k1=v1', 'k2=v2'], - 'age': 1000, - 'x-header1': ['value1', 'value2'] -}; - -const setResponseHeaders = (res) => { - res.setHeader('server', 'node'); - res.setHeader('etag', 12345); - res.setHeader('Set-Cookie', ['key1=value1', 'key2=value2']); - res.setHeader('x-header2', ['value1', 'value2']); -}; - -const httpServer = http.createServer((req, res) => { - const path = req.url; - switch (path) { - case '/hello-world': - setResponseHeaders(res); - res.writeHead(200); - res.end('hello world\n'); - break; - default: - assert(false, `Unexpected path: ${path}`); - } -}); - -const httpsServer = https.createServer({ - key: fixtures.readKey('agent1-key.pem'), - cert: fixtures.readKey('agent1-cert.pem') -}, (req, res) => { - const path = req.url; - switch (path) { - case '/hello-world': - setResponseHeaders(res); - res.writeHead(200); - res.end('hello world\n'); - break; - default: - assert(false, `Unexpected path: ${path}`); - } -}); - -const terminate = () => { - session.disconnect(); - httpServer.close(); - httpsServer.close(); - inspector.close(); -}; - -const testHttpGet = () => new Promise((resolve, reject) => { - session.on('Network.requestWillBeSent', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(params.request.url, 'http://127.0.0.1/hello-world'); - assert.strictEqual(params.request.method, 'GET'); - assert.strictEqual(typeof params.request.headers, 'object'); - assert.strictEqual(params.request.headers['accept-language'], 'en-US'); - assert.strictEqual(params.request.headers.cookie, 'k1=v1; k2=v2'); - assert.strictEqual(params.request.headers.age, '1000'); - assert.strictEqual(params.request.headers['x-header1'], 'value1, value2'); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(typeof params.wallTime, 'number'); - })); - session.on('Network.responseReceived', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(params.type, 'Other'); - assert.strictEqual(params.response.status, 200); - assert.strictEqual(params.response.statusText, 'OK'); - assert.strictEqual(params.response.url, 'http://127.0.0.1/hello-world'); - assert.strictEqual(typeof params.response.headers, 'object'); - assert.strictEqual(params.response.headers.server, 'node'); - assert.strictEqual(params.response.headers.etag, '12345'); - assert.strictEqual(params.response.headers['set-cookie'], 'key1=value1\nkey2=value2'); - assert.strictEqual(params.response.headers['x-header2'], 'value1, value2'); - })); - session.on('Network.loadingFinished', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - resolve(); - })); - - http.get({ - host: '127.0.0.1', - port: httpServer.address().port, - path: '/hello-world', - headers: requestHeaders - }, common.mustCall()); -}); - -const testHttpsGet = () => new Promise((resolve, reject) => { - session.on('Network.requestWillBeSent', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(params.request.url, 'https://127.0.0.1/hello-world'); - assert.strictEqual(params.request.method, 'GET'); - assert.strictEqual(typeof params.request.headers, 'object'); - assert.strictEqual(params.request.headers['accept-language'], 'en-US'); - assert.strictEqual(params.request.headers.cookie, 'k1=v1; k2=v2'); - assert.strictEqual(params.request.headers.age, '1000'); - assert.strictEqual(params.request.headers['x-header1'], 'value1, value2'); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(typeof params.wallTime, 'number'); - })); - session.on('Network.responseReceived', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(params.type, 'Other'); - assert.strictEqual(params.response.status, 200); - assert.strictEqual(params.response.statusText, 'OK'); - assert.strictEqual(params.response.url, 'https://127.0.0.1/hello-world'); - assert.strictEqual(typeof params.response.headers, 'object'); - assert.strictEqual(params.response.headers.server, 'node'); - assert.strictEqual(params.response.headers.etag, '12345'); - assert.strictEqual(params.response.headers['set-cookie'], 'key1=value1\nkey2=value2'); - assert.strictEqual(params.response.headers['x-header2'], 'value1, value2'); - })); - session.on('Network.loadingFinished', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - resolve(); - })); - - https.get({ - host: '127.0.0.1', - port: httpsServer.address().port, - path: '/hello-world', - rejectUnauthorized: false, - headers: requestHeaders, - }, common.mustCall()); -}); - -const testHttpError = () => new Promise((resolve, reject) => { - session.on('Network.requestWillBeSent', common.mustCall()); - session.on('Network.loadingFailed', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(params.type, 'Other'); - assert.strictEqual(typeof params.errorText, 'string'); - resolve(); - })); - session.on('Network.responseReceived', common.mustNotCall()); - session.on('Network.loadingFinished', common.mustNotCall()); - - http.get({ - host: addresses.INVALID_HOST, - }, common.mustNotCall()).on('error', common.mustCall()); -}); - - -const testHttpsError = () => new Promise((resolve, reject) => { - session.on('Network.requestWillBeSent', common.mustCall()); - session.on('Network.loadingFailed', common.mustCall(({ params }) => { - assert.ok(params.requestId.startsWith('node-network-event-')); - assert.strictEqual(typeof params.timestamp, 'number'); - assert.strictEqual(params.type, 'Other'); - assert.strictEqual(typeof params.errorText, 'string'); - resolve(); - })); - session.on('Network.responseReceived', common.mustNotCall()); - session.on('Network.loadingFinished', common.mustNotCall()); - - https.get({ - host: addresses.INVALID_HOST, - }, common.mustNotCall()).on('error', common.mustCall()); -}); - -const testNetworkInspection = async () => { - await testHttpGet(); - session.removeAllListeners(); - await testHttpsGet(); - session.removeAllListeners(); - await testHttpError(); - session.removeAllListeners(); - await testHttpsError(); - session.removeAllListeners(); -}; - -httpServer.listen(0, () => { - httpsServer.listen(0, async () => { - try { - await session.post('Network.enable'); - await testNetworkInspection(); - await session.post('Network.disable'); - } catch (e) { - assert.fail(e); - } finally { - terminate(); - } - }); -}); diff --git a/test/parallel/test-inspector-network-http.js b/test/parallel/test-inspector-network-http.js new file mode 100644 index 00000000000000..e1e987cdd71e28 --- /dev/null +++ b/test/parallel/test-inspector-network-http.js @@ -0,0 +1,241 @@ +// Flags: --inspect=0 --experimental-network-inspection +'use strict'; +const common = require('../common'); + +common.skipIfInspectorDisabled(); + +const assert = require('node:assert'); +const { once } = require('node:events'); +const { addresses } = require('../common/internet'); +const fixtures = require('../common/fixtures'); +const http = require('node:http'); +const https = require('node:https'); +const inspector = require('node:inspector/promises'); + +const session = new inspector.Session(); +session.connect(); + +const requestHeaders = { + 'accept-language': 'en-US', + 'Cookie': ['k1=v1', 'k2=v2'], + 'age': 1000, + 'x-header1': ['value1', 'value2'] +}; + +const setResponseHeaders = (res) => { + res.setHeader('server', 'node'); + res.setHeader('etag', 12345); + res.setHeader('Set-Cookie', ['key1=value1', 'key2=value2']); + res.setHeader('x-header2', ['value1', 'value2']); +}; + +const kTimeout = 1000; +const kDelta = 200; + +const handleRequest = (req, res) => { + const path = req.url; + switch (path) { + case '/hello-world': + setResponseHeaders(res); + res.writeHead(200); + // Ensure the header is sent. + res.write('\n'); + + setTimeout(() => { + res.end('hello world\n'); + }, kTimeout); + break; + default: + assert(false, `Unexpected path: ${path}`); + } +}; + +const httpServer = http.createServer(handleRequest); + +const httpsServer = https.createServer({ + key: fixtures.readKey('agent1-key.pem'), + cert: fixtures.readKey('agent1-cert.pem') +}, handleRequest); + +const terminate = () => { + session.disconnect(); + httpServer.close(); + httpsServer.close(); + inspector.close(); +}; + +function verifyRequestWillBeSent({ method, params }, expect) { + assert.strictEqual(method, 'Network.requestWillBeSent'); + + assert.ok(params.requestId.startsWith('node-network-event-')); + assert.strictEqual(params.request.url, expect.url); + assert.strictEqual(params.request.method, 'GET'); + assert.strictEqual(typeof params.request.headers, 'object'); + assert.strictEqual(params.request.headers['accept-language'], 'en-US'); + assert.strictEqual(params.request.headers.cookie, 'k1=v1; k2=v2'); + assert.strictEqual(params.request.headers.age, '1000'); + assert.strictEqual(params.request.headers['x-header1'], 'value1, value2'); + assert.strictEqual(typeof params.timestamp, 'number'); + assert.strictEqual(typeof params.wallTime, 'number'); + + return params; +} + +function verifyResponseReceived({ method, params }, expect) { + assert.strictEqual(method, 'Network.responseReceived'); + + assert.ok(params.requestId.startsWith('node-network-event-')); + assert.strictEqual(typeof params.timestamp, 'number'); + assert.strictEqual(params.type, 'Other'); + assert.strictEqual(params.response.status, 200); + assert.strictEqual(params.response.statusText, 'OK'); + assert.strictEqual(params.response.url, expect.url); + assert.strictEqual(typeof params.response.headers, 'object'); + assert.strictEqual(params.response.headers.server, 'node'); + assert.strictEqual(params.response.headers.etag, '12345'); + assert.strictEqual(params.response.headers['set-cookie'], 'key1=value1\nkey2=value2'); + assert.strictEqual(params.response.headers['x-header2'], 'value1, value2'); + + return params; +} + +function verifyLoadingFinished({ method, params }) { + assert.strictEqual(method, 'Network.loadingFinished'); + + assert.ok(params.requestId.startsWith('node-network-event-')); + assert.strictEqual(typeof params.timestamp, 'number'); + return params; +} + +function verifyLoadingFailed({ method, params }) { + assert.strictEqual(method, 'Network.loadingFailed'); + + assert.ok(params.requestId.startsWith('node-network-event-')); + assert.strictEqual(typeof params.timestamp, 'number'); + assert.strictEqual(params.type, 'Other'); + assert.strictEqual(typeof params.errorText, 'string'); +} + +async function testHttpGet() { + const url = `http://127.0.0.1:${httpServer.address().port}/hello-world`; + const requestWillBeSentFuture = once(session, 'Network.requestWillBeSent') + .then(([event]) => verifyRequestWillBeSent(event, { url })); + + const responseReceivedFuture = once(session, 'Network.responseReceived') + .then(([event]) => verifyResponseReceived(event, { url })); + + const loadingFinishedFuture = once(session, 'Network.loadingFinished') + .then(([event]) => verifyLoadingFinished(event)); + + http.get({ + host: '127.0.0.1', + port: httpServer.address().port, + path: '/hello-world', + headers: requestHeaders + }, common.mustCall((res) => { + // Dump the response. + res.on('data', () => {}); + res.on('end', () => {}); + })); + + await requestWillBeSentFuture; + const responseReceived = await responseReceivedFuture; + const loadingFinished = await loadingFinishedFuture; + + const delta = (loadingFinished.timestamp - responseReceived.timestamp) * 1000; + assert.ok(delta > kDelta); +} + +async function testHttpsGet() { + const url = `https://127.0.0.1:${httpsServer.address().port}/hello-world`; + const requestWillBeSentFuture = once(session, 'Network.requestWillBeSent') + .then(([event]) => verifyRequestWillBeSent(event, { url })); + + const responseReceivedFuture = once(session, 'Network.responseReceived') + .then(([event]) => verifyResponseReceived(event, { url })); + + const loadingFinishedFuture = once(session, 'Network.loadingFinished') + .then(([event]) => verifyLoadingFinished(event)); + + https.get({ + host: '127.0.0.1', + port: httpsServer.address().port, + path: '/hello-world', + rejectUnauthorized: false, + headers: requestHeaders, + }, common.mustCall((res) => { + // Dump the response. + res.on('data', () => {}); + res.on('end', () => {}); + })); + + await requestWillBeSentFuture; + const responseReceived = await responseReceivedFuture; + const loadingFinished = await loadingFinishedFuture; + + const delta = (loadingFinished.timestamp - responseReceived.timestamp) * 1000; + assert.ok(delta > kDelta); +} + +async function testHttpError() { + const url = `http://${addresses.INVALID_HOST}/`; + const requestWillBeSentFuture = once(session, 'Network.requestWillBeSent') + .then(([event]) => verifyRequestWillBeSent(event, { url })); + session.on('Network.responseReceived', common.mustNotCall()); + session.on('Network.loadingFinished', common.mustNotCall()); + + const loadingFailedFuture = once(session, 'Network.loadingFailed') + .then(([event]) => verifyLoadingFailed(event)); + + http.get({ + host: addresses.INVALID_HOST, + headers: requestHeaders, + }, common.mustNotCall()).on('error', common.mustCall()); + + await requestWillBeSentFuture; + await loadingFailedFuture; +} + +async function testHttpsError() { + const url = `https://${addresses.INVALID_HOST}/`; + const requestWillBeSentFuture = once(session, 'Network.requestWillBeSent') + .then(([event]) => verifyRequestWillBeSent(event, { url })); + session.on('Network.responseReceived', common.mustNotCall()); + session.on('Network.loadingFinished', common.mustNotCall()); + + const loadingFailedFuture = once(session, 'Network.loadingFailed') + .then(([event]) => verifyLoadingFailed(event)); + + https.get({ + host: addresses.INVALID_HOST, + headers: requestHeaders, + }, common.mustNotCall()).on('error', common.mustCall()); + + await requestWillBeSentFuture; + await loadingFailedFuture; +} + +const testNetworkInspection = async () => { + await testHttpGet(); + session.removeAllListeners(); + await testHttpsGet(); + session.removeAllListeners(); + await testHttpError(); + session.removeAllListeners(); + await testHttpsError(); + session.removeAllListeners(); +}; + +httpServer.listen(0, () => { + httpsServer.listen(0, async () => { + try { + await session.post('Network.enable'); + await testNetworkInspection(); + await session.post('Network.disable'); + } catch (e) { + assert.fail(e); + } finally { + terminate(); + } + }); +}); From e1887d2c581bd48041e571dd2c0cf70e1578b87e Mon Sep 17 00:00:00 2001 From: James M Snell Date: Fri, 3 Jan 2025 11:11:31 -0800 Subject: [PATCH 015/158] src: use LocalVector in more places MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56457 Reviewed-By: Michaël Zasso Reviewed-By: Yagiz Nizipli --- src/crypto/crypto_util.h | 5 +++-- src/env.cc | 7 ++----- src/env.h | 11 ++++++++++- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/crypto/crypto_util.h b/src/crypto/crypto_util.h index 5c717c6fdb0fc4..a72c0a2a908294 100644 --- a/src/crypto/crypto_util.h +++ b/src/crypto/crypto_util.h @@ -547,7 +547,8 @@ void ThrowCryptoError(Environment* env, class CipherPushContext { public: - inline explicit CipherPushContext(Environment* env) : env_(env) {} + inline explicit CipherPushContext(Environment* env) + : list_(env->isolate()), env_(env) {} inline void push_back(const char* str) { list_.emplace_back(OneByteString(env_->isolate(), str)); @@ -558,7 +559,7 @@ class CipherPushContext { } private: - std::vector> list_; + v8::LocalVector list_; Environment* env_; }; diff --git a/src/env.cc b/src/env.cc index d4426432d67ba6..f0f97244fdef63 100644 --- a/src/env.cc +++ b/src/env.cc @@ -176,11 +176,7 @@ bool AsyncHooks::pop_async_context(double async_id) { } #endif native_execution_async_resources_.resize(offset); - if (native_execution_async_resources_.size() < - native_execution_async_resources_.capacity() / 2 && - native_execution_async_resources_.size() > 16) { - native_execution_async_resources_.shrink_to_fit(); - } + native_execution_async_resources_.shrink_to_fit(); } if (js_execution_async_resources()->Length() > offset) [[unlikely]] { @@ -1694,6 +1690,7 @@ AsyncHooks::AsyncHooks(Isolate* isolate, const SerializeInfo* info) fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)), async_id_fields_( isolate, kUidFieldsCount, MAYBE_FIELD_PTR(info, async_id_fields)), + native_execution_async_resources_(isolate), info_(info) { HandleScope handle_scope(isolate); if (info == nullptr) { diff --git a/src/env.h b/src/env.h index e67c34d31a8ce4..4082458cf6aad2 100644 --- a/src/env.h +++ b/src/env.h @@ -401,7 +401,16 @@ class AsyncHooks : public MemoryRetainer { void grow_async_ids_stack(); v8::Global js_execution_async_resources_; - std::vector> native_execution_async_resources_; + + // TODO(@jasnell): Note that this is technically illegal use of + // v8::Locals which should be kept on the stack. Here, the entries + // in this object grows and shrinks with the C stack, and entries + // will be in the right handle scopes, but v8::Locals are supposed + // to remain on the stack and not the heap. For general purposes + // this *should* be ok but may need to be looked at further should + // v8 become stricter in the future about v8::Locals being held in + // the stack. + v8::LocalVector native_execution_async_resources_; // Non-empty during deserialization const SerializeInfo* info_ = nullptr; From aab53e69658f2c0eb45af3bd831fca723c0657c2 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 6 Jan 2025 02:22:27 -0500 Subject: [PATCH 016/158] worker: flush stdout and stderr on exit Signed-off-by: Matteo Collina PR-URL: https://github.com/nodejs/node/pull/56428 Reviewed-By: James M Snell Reviewed-By: Paolo Insogna --- .../bootstrap/switches/is_not_main_thread.js | 13 +++++++++- lib/internal/worker/io.js | 10 +++++--- .../test-worker-stdio-flush-inflight.js | 24 ++++++++++++++++++ test/parallel/test-worker-stdio-flush.js | 25 +++++++++++++++++++ 4 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 test/parallel/test-worker-stdio-flush-inflight.js create mode 100644 test/parallel/test-worker-stdio-flush.js diff --git a/lib/internal/bootstrap/switches/is_not_main_thread.js b/lib/internal/bootstrap/switches/is_not_main_thread.js index 03aa7c3ebe12f2..6fa30aec748af0 100644 --- a/lib/internal/bootstrap/switches/is_not_main_thread.js +++ b/lib/internal/bootstrap/switches/is_not_main_thread.js @@ -33,11 +33,22 @@ process.removeListener('removeListener', stopListeningIfSignal); const { createWorkerStdio, + kStdioWantsMoreDataCallback, } = require('internal/worker/io'); let workerStdio; function lazyWorkerStdio() { - return workerStdio ??= createWorkerStdio(); + if (workerStdio === undefined) { + workerStdio = createWorkerStdio(); + process.on('exit', flushSync); + } + + return workerStdio; +} + +function flushSync() { + workerStdio.stdout[kStdioWantsMoreDataCallback](); + workerStdio.stderr[kStdioWantsMoreDataCallback](); } function getStdout() { return lazyWorkerStdio().stdout; } diff --git a/lib/internal/worker/io.js b/lib/internal/worker/io.js index 42b8845cec6711..2b28c6a2487b11 100644 --- a/lib/internal/worker/io.js +++ b/lib/internal/worker/io.js @@ -292,9 +292,13 @@ class WritableWorkerStdio extends Writable { chunks: ArrayPrototypeMap(chunks, ({ chunk, encoding }) => ({ chunk, encoding })), }); - ArrayPrototypePush(this[kWritableCallbacks], cb); - if (this[kPort][kWaitingStreams]++ === 0) - this[kPort].ref(); + if (process._exiting) { + cb(); + } else { + ArrayPrototypePush(this[kWritableCallbacks], cb); + if (this[kPort][kWaitingStreams]++ === 0) + this[kPort].ref(); + } } _final(cb) { diff --git a/test/parallel/test-worker-stdio-flush-inflight.js b/test/parallel/test-worker-stdio-flush-inflight.js new file mode 100644 index 00000000000000..34b81152811e7b --- /dev/null +++ b/test/parallel/test-worker-stdio-flush-inflight.js @@ -0,0 +1,24 @@ +'use strict'; +const common = require('../common'); +const assert = require('assert'); +const { Worker, isMainThread } = require('worker_threads'); + +if (isMainThread) { + const w = new Worker(__filename, { stdout: true }); + const expected = 'hello world'; + + let data = ''; + w.stdout.setEncoding('utf8'); + w.stdout.on('data', (chunk) => { + data += chunk; + }); + + w.on('exit', common.mustCall(() => { + assert.strictEqual(data, expected); + })); +} else { + process.stdout.write('hello'); + process.stdout.write(' '); + process.stdout.write('world'); + process.exit(0); +} diff --git a/test/parallel/test-worker-stdio-flush.js b/test/parallel/test-worker-stdio-flush.js new file mode 100644 index 00000000000000..e52e721fc69483 --- /dev/null +++ b/test/parallel/test-worker-stdio-flush.js @@ -0,0 +1,25 @@ +'use strict'; +const common = require('../common'); +const assert = require('assert'); +const { Worker, isMainThread } = require('worker_threads'); + +if (isMainThread) { + const w = new Worker(__filename, { stdout: true }); + const expected = 'hello world'; + + let data = ''; + w.stdout.setEncoding('utf8'); + w.stdout.on('data', (chunk) => { + data += chunk; + }); + + w.on('exit', common.mustCall(() => { + assert.strictEqual(data, expected); + })); +} else { + process.on('exit', () => { + process.stdout.write(' '); + process.stdout.write('world'); + }); + process.stdout.write('hello'); +} From 295db19ba2709f5b799e184aa6c20ef577eb242c Mon Sep 17 00:00:00 2001 From: sebastianas Date: Mon, 6 Jan 2025 16:58:13 +0100 Subject: [PATCH 017/158] test: update error code in tls-psk-circuit for for OpenSSL 3.4 Update parallel/test-tls-psk-circuit.js to account for error code changes in OpenSSL 3.4 and probably later. Signed-off-by: Sebastian Andrzej Siewior PR-URL: https://github.com/nodejs/node/pull/56420 Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau --- test/parallel/test-tls-psk-circuit.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/parallel/test-tls-psk-circuit.js b/test/parallel/test-tls-psk-circuit.js index e93db3eb1b4923..c06e61c321ef67 100644 --- a/test/parallel/test-tls-psk-circuit.js +++ b/test/parallel/test-tls-psk-circuit.js @@ -66,7 +66,8 @@ const expectedHandshakeErr = common.hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE' : 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; test({ psk: USERS.UserB, identity: 'UserC' }, {}, expectedHandshakeErr); // Recognized user but incorrect secret should fail handshake -const expectedIllegalParameterErr = common.hasOpenSSL(3, 2) ? - 'ERR_SSL_SSL/TLS_ALERT_ILLEGAL_PARAMETER' : 'ERR_SSL_SSLV3_ALERT_ILLEGAL_PARAMETER'; +const expectedIllegalParameterErr = common.hasOpenSSL(3, 4) ? 'ERR_SSL_TLSV1_ALERT_DECRYPT_ERROR' : + common.hasOpenSSL(3, 2) ? + 'ERR_SSL_SSL/TLS_ALERT_ILLEGAL_PARAMETER' : 'ERR_SSL_SSLV3_ALERT_ILLEGAL_PARAMETER'; test({ psk: USERS.UserA, identity: 'UserB' }, {}, expectedIllegalParameterErr); test({ psk: USERS.UserB, identity: 'UserB' }); From edafab7248610f7e45abe92d33dcddf5b2e67924 Mon Sep 17 00:00:00 2001 From: Chengzhong Wu Date: Tue, 7 Jan 2025 11:03:55 +0000 Subject: [PATCH 018/158] src: drain platform tasks before creating startup snapshot Drain the loop and platform tasks before creating a snapshot. This is necessary to ensure that the no roots are held by the the platform tasks, which may reference objects associated with a context. For example, a WeakRef may schedule an per-isolate platform task as a GC root, and referencing an object in a context, causing an assertion in the snapshot creator. PR-URL: https://github.com/nodejs/node/pull/56403 Refs: https://github.com/nodejs/node/pull/56292 Reviewed-By: James M Snell Reviewed-By: Joyee Cheung --- src/node_snapshotable.cc | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/node_snapshotable.cc b/src/node_snapshotable.cc index fe04a8ee8d708b..fe3fcc7184205f 100644 --- a/src/node_snapshotable.cc +++ b/src/node_snapshotable.cc @@ -973,25 +973,29 @@ ExitCode BuildSnapshotWithoutCodeCache( } }); + Context::Scope context_scope(setup->context()); + Environment* env = setup->env(); + // Run the custom main script for fully customized snapshots. if (snapshot_type == SnapshotMetadata::Type::kFullyCustomized) { - Context::Scope context_scope(setup->context()); - Environment* env = setup->env(); #if HAVE_INSPECTOR env->InitializeInspector({}); #endif if (LoadEnvironment(env, builder_script_content.value()).IsEmpty()) { return ExitCode::kGenericUserError; } + } - // FIXME(joyeecheung): right now running the loop in the snapshot - // builder might introduce inconsistencies in JS land that need to - // be synchronized again after snapshot restoration. - ExitCode exit_code = - SpinEventLoopInternal(env).FromMaybe(ExitCode::kGenericUserError); - if (exit_code != ExitCode::kNoFailure) { - return exit_code; - } + // Drain the loop and platform tasks before creating a snapshot. This is + // necessary to ensure that the no roots are held by the the platform + // tasks, which may reference objects associated with a context. For + // example, a WeakRef may schedule an per-isolate platform task as a GC + // root, and referencing an object in a context, causing an assertion in + // the snapshot creator. + ExitCode exit_code = + SpinEventLoopInternal(env).FromMaybe(ExitCode::kGenericUserError); + if (exit_code != ExitCode::kNoFailure) { + return exit_code; } } From 2a0fbd873175371def57d6b2dc6f53d725b8484c Mon Sep 17 00:00:00 2001 From: Xiao-Tao Date: Wed, 8 Jan 2025 00:31:45 +0800 Subject: [PATCH 019/158] tools: fix loong64 build failed PR-URL: https://github.com/nodejs/node/pull/56466 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca --- tools/v8_gypfiles/v8.gyp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/v8_gypfiles/v8.gyp b/tools/v8_gypfiles/v8.gyp index 88c1297b9a09ec..9ccab9214a650c 100644 --- a/tools/v8_gypfiles/v8.gyp +++ b/tools/v8_gypfiles/v8.gyp @@ -1230,6 +1230,11 @@ '<(V8_ROOT)/src/trap-handler/handler-outside-posix.cc', ], }], + ['(_toolset=="host" and host_arch=="x64" or _toolset=="target" and target_arch=="x64") and (OS=="linux")', { + 'sources': [ + '<(V8_ROOT)/src/trap-handler/handler-outside-simulator.cc', + ], + }], ], }], ], From 7dd8165b0be3b3441c2e8216f3a069e0395dbb7d Mon Sep 17 00:00:00 2001 From: Joyee Cheung Date: Tue, 7 Jan 2025 17:49:10 +0100 Subject: [PATCH 020/158] src: lock the thread properly in snapshot builder Otherwise it can crash DCHECK when V8 expects that at least someone is locking the current thread. PR-URL: https://github.com/nodejs/node/pull/56327 Fixes: https://github.com/nodejs/node-v8/issues/294 Reviewed-By: James M Snell Reviewed-By: Chengzhong Wu Reviewed-By: Minwoo Jung Reviewed-By: Anna Henningsen --- src/node_snapshotable.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/node_snapshotable.cc b/src/node_snapshotable.cc index fe3fcc7184205f..f9acb7b1d1618e 100644 --- a/src/node_snapshotable.cc +++ b/src/node_snapshotable.cc @@ -962,6 +962,8 @@ ExitCode BuildSnapshotWithoutCodeCache( } Isolate* isolate = setup->isolate(); + v8::Locker locker(isolate); + { HandleScope scope(isolate); TryCatch bootstrapCatch(isolate); From aa3fd2f58f5e6b3a06a742d7d5cbc9c913cc64d7 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Tue, 7 Jan 2025 13:08:55 -0800 Subject: [PATCH 021/158] src: make some minor ToLocalChecked cleanups PR-URL: https://github.com/nodejs/node/pull/56483 Reviewed-By: Yagiz Nizipli Reviewed-By: Daeyeon Jeong Reviewed-By: Anna Henningsen --- src/cares_wrap.cc | 48 ++++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/cares_wrap.cc b/src/cares_wrap.cc index a19e8221f34eba..e79f43d1824b60 100644 --- a/src/cares_wrap.cc +++ b/src/cares_wrap.cc @@ -1580,15 +1580,16 @@ void ConvertIpv6StringToBuffer(const FunctionCallbackInfo& args) { if (uv_inet_pton(AF_INET6, *ip, dst) != 0) { isolate->ThrowException(Exception::Error( - String::NewFromUtf8(isolate, "Invalid IPv6 address").ToLocalChecked())); + FIXED_ONE_BYTE_STRING(isolate, "Invalid IPv6 address"))); return; } - Local buffer = - node::Buffer::Copy( + Local buffer; + if (node::Buffer::Copy( isolate, reinterpret_cast(dst), sizeof(dst)) - .ToLocalChecked(); - args.GetReturnValue().Set(buffer); + .ToLocal(&buffer)) { + args.GetReturnValue().Set(buffer); + } } void GetAddrInfo(const FunctionCallbackInfo& args) { @@ -1750,22 +1751,27 @@ void SetServers(const FunctionCallbackInfo& args) { int err; for (uint32_t i = 0; i < len; i++) { - CHECK(arr->Get(env->context(), i).ToLocalChecked()->IsArray()); - - Local elm = arr->Get(env->context(), i).ToLocalChecked().As(); - - CHECK(elm->Get(env->context(), - 0).ToLocalChecked()->Int32Value(env->context()).FromJust()); - CHECK(elm->Get(env->context(), 1).ToLocalChecked()->IsString()); - CHECK(elm->Get(env->context(), - 2).ToLocalChecked()->Int32Value(env->context()).FromJust()); - - int fam = elm->Get(env->context(), 0) - .ToLocalChecked()->Int32Value(env->context()).FromJust(); - node::Utf8Value ip(env->isolate(), - elm->Get(env->context(), 1).ToLocalChecked()); - int port = elm->Get(env->context(), 2) - .ToLocalChecked()->Int32Value(env->context()).FromJust(); + Local val; + if (!arr->Get(env->context(), i).ToLocal(&val)) return; + CHECK(val->IsArray()); + + Local elm = val.As(); + + Local familyValue; + Local ipValue; + Local portValue; + + if (!elm->Get(env->context(), 0).ToLocal(&familyValue)) return; + if (!elm->Get(env->context(), 1).ToLocal(&ipValue)) return; + if (!elm->Get(env->context(), 2).ToLocal(&portValue)) return; + + CHECK(familyValue->Int32Value(env->context()).FromJust()); + CHECK(ipValue->IsString()); + CHECK(portValue->Int32Value(env->context()).FromJust()); + + int fam = familyValue->Int32Value(env->context()).FromJust(); + node::Utf8Value ip(env->isolate(), ipValue); + int port = portValue->Int32Value(env->context()).FromJust(); ares_addr_port_node* cur = &servers[i]; From 5e1ddd5d4caea3239c49e4931cae20c6220c83b5 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Sun, 5 Jan 2025 13:48:35 -0800 Subject: [PATCH 022/158] src: fixup more ToLocalChecked uses in node_file PR-URL: https://github.com/nodejs/node/pull/56484 Reviewed-By: Daeyeon Jeong Reviewed-By: Anna Henningsen Reviewed-By: Yagiz Nizipli --- src/node_file.cc | 70 +++++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/src/node_file.cc b/src/node_file.cc index 6d097904f67b89..1b56d2323c9526 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -1406,16 +1406,15 @@ static void ReadLink(const FunctionCallbackInfo& args) { const char* link_path = static_cast(req_wrap_sync.req.ptr); Local error; - MaybeLocal rc = StringBytes::Encode(isolate, - link_path, - encoding, - &error); - if (rc.IsEmpty()) { + Local ret; + if (!StringBytes::Encode(isolate, link_path, encoding, &error) + .ToLocal(&ret)) { + DCHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; } - args.GetReturnValue().Set(rc.ToLocalChecked()); + args.GetReturnValue().Set(ret); } } @@ -1916,15 +1915,16 @@ static void MKDir(const FunctionCallbackInfo& args) { } if (!req_wrap_sync.continuation_data()->first_path().empty()) { Local error; + Local ret; std::string first_path(req_wrap_sync.continuation_data()->first_path()); - MaybeLocal path = StringBytes::Encode(env->isolate(), - first_path.c_str(), - UTF8, &error); - if (path.IsEmpty()) { + if (!StringBytes::Encode( + env->isolate(), first_path.c_str(), UTF8, &error) + .ToLocal(&ret)) { + DCHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; } - args.GetReturnValue().Set(path.ToLocalChecked()); + args.GetReturnValue().Set(ret); } } else { SyncCallAndThrowOnError(env, &req_wrap_sync, uv_fs_mkdir, *path, mode); @@ -1965,16 +1965,15 @@ static void RealPath(const FunctionCallbackInfo& args) { const char* link_path = static_cast(req_wrap_sync.req.ptr); Local error; - MaybeLocal rc = StringBytes::Encode(isolate, - link_path, - encoding, - &error); - if (rc.IsEmpty()) { + Local ret; + if (!StringBytes::Encode(isolate, link_path, encoding, &error) + .ToLocal(&ret)) { + DCHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; } - args.GetReturnValue().Set(rc.ToLocalChecked()); + args.GetReturnValue().Set(ret); } } @@ -2061,17 +2060,15 @@ static void ReadDir(const FunctionCallbackInfo& args) { } Local error; - MaybeLocal filename = StringBytes::Encode(isolate, - ent.name, - encoding, - &error); - - if (filename.IsEmpty()) { + Local fn; + if (!StringBytes::Encode(isolate, ent.name, encoding, &error) + .ToLocal(&fn)) { + DCHECK(!error.IsEmpty()); isolate->ThrowException(error); return; } - name_v.push_back(filename.ToLocalChecked()); + name_v.push_back(fn); if (with_types) { type_v.emplace_back(Integer::New(isolate, ent.type)); @@ -3092,13 +3089,14 @@ static void Mkdtemp(const FunctionCallbackInfo& args) { return; } Local error; - MaybeLocal rc = - StringBytes::Encode(isolate, req_wrap_sync.req.path, encoding, &error); - if (rc.IsEmpty()) { + Local ret; + if (!StringBytes::Encode(isolate, req_wrap_sync.req.path, encoding, &error) + .ToLocal(&ret)) { + DCHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; } - args.GetReturnValue().Set(rc.ToLocalChecked()); + args.GetReturnValue().Set(ret); } } @@ -3410,9 +3408,11 @@ void BindingData::LegacyMainResolve(const FunctionCallbackInfo& args) { for (int i = 0; i < legacy_main_extensions_with_main_end; i++) { file_path = *initial_file_path + std::string(legacy_main_extensions[i]); // TODO(anonrig): Remove this when ToNamespacedPath supports std::string - Local local_file_path = - Buffer::Copy(env->isolate(), file_path.c_str(), file_path.size()) - .ToLocalChecked(); + Local local_file_path; + if (!Buffer::Copy(env->isolate(), file_path.c_str(), file_path.size()) + .ToLocal(&local_file_path)) { + return; + } BufferValue buff_file_path(isolate, local_file_path); ToNamespacedPath(env, &buff_file_path); @@ -3445,9 +3445,11 @@ void BindingData::LegacyMainResolve(const FunctionCallbackInfo& args) { i++) { file_path = *initial_file_path + std::string(legacy_main_extensions[i]); // TODO(anonrig): Remove this when ToNamespacedPath supports std::string - Local local_file_path = - Buffer::Copy(env->isolate(), file_path.c_str(), file_path.size()) - .ToLocalChecked(); + Local local_file_path; + if (!Buffer::Copy(env->isolate(), file_path.c_str(), file_path.size()) + .ToLocal(&local_file_path)) { + return; + } BufferValue buff_file_path(isolate, local_file_path); ToNamespacedPath(env, &buff_file_path); From 1994eaaf52e1ec6212720f58b0b185bbf133433c Mon Sep 17 00:00:00 2001 From: James M Snell Date: Fri, 3 Jan 2025 15:22:12 -0800 Subject: [PATCH 023/158] crypto: make generatePrime/checkPrime interruptible The `generatePrime` and `checkPrime` functions in the `crypto` module are only somewhat interruptible. This change makes it possible to interrupt these more reliably. Note that generating overly large primes can still take a long time and may not be interruptible as this mechanism relies on a callback to check for stopping conditions but OpenSSL may perform a long running operation without calling the callback right away. Fixes: https://github.com/nodejs/node/issues/56449 PR-URL: https://github.com/nodejs/node/pull/56460 Reviewed-By: Yagiz Nizipli Reviewed-By: Antoine du Hamel --- doc/api/crypto.md | 14 +++++++++++ src/crypto/crypto_random.cc | 40 ++++++++++++++++++++++-------- test/parallel/test-crypto-prime.js | 16 ++++++++++++ 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/doc/api/crypto.md b/doc/api/crypto.md index ecd379f694e441..966ba964ba2efe 100644 --- a/doc/api/crypto.md +++ b/doc/api/crypto.md @@ -3934,6 +3934,13 @@ By default, the prime is encoded as a big-endian sequence of octets in an {ArrayBuffer}. If the `bigint` option is `true`, then a {bigint} is provided. +The `size` of the prime will have a direct impact on how long it takes to +generate the prime. The larger the size, the longer it will take. Because +we use OpenSSL's `BN_generate_prime_ex` function, which provides only +minimal control over our ability to interrupt the generation process, +it is not recommended to generate overly large primes, as doing so may make +the process unresponsive. + ### `crypto.generatePrimeSync(size[, options])` + +* `value` {any} A value to serialize to a string. If Node.js was started with + the [`--test-update-snapshots`][] flag, the serialized value is written to + `path`. Otherwise, the serialized value is compared to the contents of the + existing snapshot file. +* `path` {string} The file where the serialized `value` is written. +* `options` {Object} Optional configuration options. The following properties + are supported: + * `serializers` {Array} An array of synchronous functions used to serialize + `value` into a string. `value` is passed as the only argument to the first + serializer function. The return value of each serializer is passed as input + to the next serializer. Once all serializers have run, the resulting value + is coerced to a string. **Default:** If no serializers are provided, the + test runner's default serializers are used. + +This function serializes `value` and writes it to the file specified by `path`. + +```js +test('snapshot test with default serialization', (t) => { + t.assert.fileSnapshot({ value1: 1, value2: 2 }, './snapshots/snapshot.json'); +}); +``` + +This function differs from `context.assert.snapshot()` in the following ways: + +* The snapshot file path is explicitly provided by the user. +* Each snapshot file is limited to a single snapshot value. +* No additional escaping is performed by the test runner. + +These differences allow snapshot files to better support features such as syntax +highlighting. + #### `context.assert.snapshot(value[, options])` +> Stability: 1 - Experimental + * `maybeRefable` {any} An object that may be "refable". An object is "refable" if it implements the Node.js "Refable protocol". -Specifically, this means that the object implements the `Symbol.for('node:ref')` -and `Symbol.for('node:unref')` methods. "Ref'd" objects will keep the Node.js +Specifically, this means that the object implements the `Symbol.for('nodejs.ref')` +and `Symbol.for('nodejs.unref')` methods. "Ref'd" objects will keep the Node.js event loop alive, while "unref'd" objects will not. Historically, this was implemented by using `ref()` and `unref()` methods directly on the objects. This pattern, however, is being deprecated in favor of the "Refable protocol" @@ -4291,11 +4293,13 @@ In [`Worker`][] threads, `process.umask(mask)` will throw an exception. added: v23.6.0 --> +> Stability: 1 - Experimental + * `maybeUnfefable` {any} An object that may be "unref'd". An object is "unrefable" if it implements the Node.js "Refable protocol". -Specifically, this means that the object implements the `Symbol.for('node:ref')` -and `Symbol.for('node:unref')` methods. "Ref'd" objects will keep the Node.js +Specifically, this means that the object implements the `Symbol.for('nodejs.ref')` +and `Symbol.for('nodejs.unref')` methods. "Ref'd" objects will keep the Node.js event loop alive, while "unref'd" objects will not. Historically, this was implemented by using `ref()` and `unref()` methods directly on the objects. This pattern, however, is being deprecated in favor of the "Refable protocol" diff --git a/lib/internal/process/per_thread.js b/lib/internal/process/per_thread.js index 0921f583183d71..134e99e2374722 100644 --- a/lib/internal/process/per_thread.js +++ b/lib/internal/process/per_thread.js @@ -421,12 +421,14 @@ function toggleTraceCategoryState(asyncHooksEnabled) { const { arch, platform, version } = process; function ref(maybeRefable) { - const fn = maybeRefable?.[SymbolFor('node:ref')] || maybeRefable?.ref; + const fn = maybeRefable?.[SymbolFor('nodejs.ref')] || maybeRefable?.[SymbolFor('node:ref')] || maybeRefable?.ref; if (typeof fn === 'function') FunctionPrototypeCall(fn, maybeRefable); } function unref(maybeRefable) { - const fn = maybeRefable?.[SymbolFor('node:unref')] || maybeRefable?.unref; + const fn = maybeRefable?.[SymbolFor('nodejs.unref')] || + maybeRefable?.[SymbolFor('node:unref')] || + maybeRefable?.unref; if (typeof fn === 'function') FunctionPrototypeCall(fn, maybeRefable); } diff --git a/test/parallel/test-process-ref-unref.js b/test/parallel/test-process-ref-unref.js index e9db4d56eefc58..6bd508c1dbb9cb 100644 --- a/test/parallel/test-process-ref-unref.js +++ b/test/parallel/test-process-ref-unref.js @@ -23,6 +23,18 @@ class Foo { } class Foo2 { + refCalled = 0; + unrefCalled = 0; + [Symbol.for('nodejs.ref')]() { + this.refCalled++; + } + [Symbol.for('nodejs.unref')]() { + this.unrefCalled++; + } +} + +// TODO(aduh95): remove support for undocumented symbol +class Foo3 { refCalled = 0; unrefCalled = 0; [Symbol.for('node:ref')]() { @@ -39,14 +51,19 @@ describe('process.ref/unref work as expected', () => { // just work. const foo1 = new Foo(); const foo2 = new Foo2(); + const foo3 = new Foo3(); process.ref(foo1); process.unref(foo1); process.ref(foo2); process.unref(foo2); + process.ref(foo3); + process.unref(foo3); strictEqual(foo1.refCalled, 1); strictEqual(foo1.unrefCalled, 1); strictEqual(foo2.refCalled, 1); strictEqual(foo2.unrefCalled, 1); + strictEqual(foo3.refCalled, 1); + strictEqual(foo3.unrefCalled, 1); // Objects that implement the legacy API also just work. const i = setInterval(() => {}, 1000); From 92eeeb98a5fb2be817b3658cfa7d87150b4a2732 Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Fri, 10 Jan 2025 14:02:12 -0300 Subject: [PATCH 042/158] doc: include CVE to EOL lines as sec release process MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refs: https://github.com/nodejs/security-wg/issues/1401 PR-URL: https://github.com/nodejs/node/pull/56520 Reviewed-By: Richard Lau Reviewed-By: Luigi Pinca Reviewed-By: Marco Ippolito Reviewed-By: Trivikram Kamat Reviewed-By: Ulises Gascón --- doc/contributing/security-release-process.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/contributing/security-release-process.md b/doc/contributing/security-release-process.md index 3508180e0d5687..d8a871bd96922c 100644 --- a/doc/contributing/security-release-process.md +++ b/doc/contributing/security-release-process.md @@ -65,6 +65,8 @@ The current security stewards are documented in the main Node.js * [ ] 4\. **Requesting CVEs:** * Request CVEs for the reports with `git node security --request-cve`. * Make sure to have a green CI before requesting a CVE. + * Check if there is a need to issue a CVE for any version that became + EOL after the last security release through [this issue](https://github.com/nodejs/security-wg/issues/1419). * [ ] 5\. **Choosing or Updating Release Date:** * Get agreement on the planned date for the release. From 4e4b0c63d0efb570b38773080104f30e22855ff0 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Fri, 10 Jan 2025 19:03:08 -0500 Subject: [PATCH 043/158] doc: fix location of NO_COLOR in CLI docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'coverage output' and 'source map cache' sections were appearing under the NO_COLOR environment variable instead of the NODE_V8_COVERAGE enviroment variable where they were intended to be. This commit fixes that issue. PR-URL: https://github.com/nodejs/node/pull/56525 Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: Ulises Gascón Reviewed-By: James M Snell --- doc/api/cli.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/api/cli.md b/doc/api/cli.md index 82a86bbab7ad48..10e95dc69945cb 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -3350,11 +3350,6 @@ easier to instrument applications that call the `child_process.spawn()` family of functions. `NODE_V8_COVERAGE` can be set to an empty string, to prevent propagation. -### `NO_COLOR=` - -[`NO_COLOR`][] is an alias for `NODE_DISABLE_COLORS`. The value of the -environment variable is arbitrary. - #### Coverage output Coverage is output as an array of [ScriptCoverage][] objects on the top-level @@ -3420,6 +3415,11 @@ and the line lengths of the source file (in the key `lineLengths`). } ``` +### `NO_COLOR=` + +[`NO_COLOR`][] is an alias for `NODE_DISABLE_COLORS`. The value of the +environment variable is arbitrary. + ### `OPENSSL_CONF=file` > Stability: 1.0 - Early development diff --git a/doc/api/module.md b/doc/api/module.md index faa8e3d030ddd9..ace26adc6c6fce 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -177,6 +177,10 @@ added: - v20.6.0 - v18.19.0 changes: + - version: v23.6.1 + pr-url: https://github.com/nodejs-private/node-private/pull/629 + description: Using this feature with the permission model enabled requires + passing `--allow-worker`. - version: - v20.8.0 - v18.19.0 @@ -205,6 +209,8 @@ changes: Register a module that exports [hooks][] that customize Node.js module resolution and loading behavior. See [Customization hooks][]. +This feature requires `--allow-worker` if used with the [Permission Model][]. + ### `module.registerHooks(options)` + + + Permissions can be used to control what system resources the Node.js process has access to or what actions the process can take with those resources. @@ -26,12 +30,16 @@ If you find a potential security vulnerability, please refer to our ### Permission Model - + > Stability: 2 - Stable. - - The Node.js Permission Model is a mechanism for restricting access to specific resources during execution. The API exists behind a flag [`--permission`][] which when enabled, From df78515664bf2df67523f56ce88b0e79f02e77ae Mon Sep 17 00:00:00 2001 From: Mattias Buelens <649348+MattiasBuelens@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:11:47 +0100 Subject: [PATCH 059/158] stream: fix typo in ReadableStreamBYOBReader.readIntoRequests PR-URL: https://github.com/nodejs/node/pull/56560 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca --- lib/internal/webstreams/readablestream.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/internal/webstreams/readablestream.js b/lib/internal/webstreams/readablestream.js index 8d884c43c2f9c3..f9b9e6b4fb2c3e 100644 --- a/lib/internal/webstreams/readablestream.js +++ b/lib/internal/webstreams/readablestream.js @@ -925,7 +925,7 @@ class ReadableStreamBYOBReader { throw new ERR_INVALID_ARG_TYPE('stream', 'ReadableStream', stream); this[kState] = { stream: undefined, - requestIntoRequests: [], + readIntoRequests: [], close: { promise: undefined, resolve: undefined, @@ -1031,7 +1031,7 @@ class ReadableStreamBYOBReader { [kInspect](depth, options) { return customInspect(depth, options, this[kType], { stream: this[kState].stream, - requestIntoRequests: this[kState].requestIntoRequests.length, + readIntoRequests: this[kState].readIntoRequests.length, close: this[kState].close.promise, }); } From 48c813fb67d0065ee5b1c336be4e5a652e52c92f Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Mon, 13 Jan 2025 13:51:34 -0300 Subject: [PATCH 060/158] src: add --disable-sigusr1 to prevent signal i/o thread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds a new flag `--disable-sigusr1` to prevent the SignalIOThread to be up listening the SIGUSR1 events and then starting the debugging session. PR-URL: https://github.com/nodejs/node/pull/56441 Reviewed-By: Juan José Arboleda Reviewed-By: James M Snell --- doc/api/cli.md | 14 +++++++++++++ src/env-inl.h | 3 ++- src/node_options.cc | 5 +++++ src/node_options.h | 1 + test/fixtures/disable-signal/sigusr1.js | 2 ++ test/parallel/test-disable-sigusr1.js | 26 +++++++++++++++++++++++++ 6 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 test/fixtures/disable-signal/sigusr1.js create mode 100644 test/parallel/test-disable-sigusr1.js diff --git a/doc/api/cli.md b/doc/api/cli.md index a8732e5dac598a..b2e20da8fc161b 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -573,6 +573,17 @@ Disable the `Object.prototype.__proto__` property. If `mode` is `delete`, the property is removed entirely. If `mode` is `throw`, accesses to the property throw an exception with the code `ERR_PROTO_ACCESS`. +### `--disable-sigusr1` + + + +> Stability: 1.2 - Release candidate + +Disable the ability of starting a debugging session by sending a +`SIGUSR1` signal to the process. + ### `--disable-warning=code-or-type` > Stability: 1.1 - Active development @@ -1471,6 +1482,7 @@ added: v7.6.0 Set the `host:port` to be used when the inspector is activated. Useful when activating the inspector by sending the `SIGUSR1` signal. +Except when [`--disable-sigusr1`][] is passed. Default host is `127.0.0.1`. If port `0` is specified, a random available port will be used. @@ -3082,6 +3094,7 @@ one is included in the list below. * `--conditions`, `-C` * `--diagnostic-dir` * `--disable-proto` +* `--disable-sigusr1` * `--disable-warning` * `--disable-wasm-trap-handler` * `--dns-result-order` @@ -3660,6 +3673,7 @@ node --stack-trace-limit=12 -p -e "Error.stackTraceLimit" # prints 12 [`--build-snapshot`]: #--build-snapshot [`--cpu-prof-dir`]: #--cpu-prof-dir [`--diagnostic-dir`]: #--diagnostic-dirdirectory +[`--disable-sigusr1`]: #--disable-sigusr1 [`--env-file-if-exists`]: #--env-file-if-existsconfig [`--env-file`]: #--env-fileconfig [`--experimental-addon-modules`]: #--experimental-addon-modules diff --git a/src/env-inl.h b/src/env-inl.h index d266eca6fc3300..9a8216354e646e 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -666,7 +666,8 @@ inline bool Environment::no_global_search_paths() const { } inline bool Environment::should_start_debug_signal_handler() const { - return (flags_ & EnvironmentFlags::kNoStartDebugSignalHandler) == 0; + return ((flags_ & EnvironmentFlags::kNoStartDebugSignalHandler) == 0) && + !options_->disable_sigusr1; } inline bool Environment::no_browser_globals() const { diff --git a/src/node_options.cc b/src/node_options.cc index d21ddb6a45d43c..cd0bfc8ca5d69e 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -386,6 +386,11 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { " (default: current working directory)", &EnvironmentOptions::diagnostic_dir, kAllowedInEnvvar); + AddOption("--disable-sigusr1", + "Disable inspector thread to be listening for SIGUSR1 signal", + &EnvironmentOptions::disable_sigusr1, + kAllowedInEnvvar, + false); AddOption("--dns-result-order", "set default value of verbatim in dns.lookup. Options are " "'ipv4first' (IPv4 addresses are placed before IPv6 addresses) " diff --git a/src/node_options.h b/src/node_options.h index 8b9f8a825e61c4..04dbe965a57010 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -116,6 +116,7 @@ class EnvironmentOptions : public Options { bool abort_on_uncaught_exception = false; std::vector conditions; bool detect_module = true; + bool disable_sigusr1 = false; bool print_required_tla = false; bool require_module = true; std::string dns_result_order; diff --git a/test/fixtures/disable-signal/sigusr1.js b/test/fixtures/disable-signal/sigusr1.js new file mode 100644 index 00000000000000..b4deb246c8cc45 --- /dev/null +++ b/test/fixtures/disable-signal/sigusr1.js @@ -0,0 +1,2 @@ +console.log('pid is', process.pid); +setInterval(() => {}, 1000); \ No newline at end of file diff --git a/test/parallel/test-disable-sigusr1.js b/test/parallel/test-disable-sigusr1.js new file mode 100644 index 00000000000000..e1d15a25ee6505 --- /dev/null +++ b/test/parallel/test-disable-sigusr1.js @@ -0,0 +1,26 @@ +'use strict'; + +const common = require('../common'); +const fixtures = require('../common/fixtures'); +const { it } = require('node:test'); +const assert = require('node:assert'); +const { NodeInstance } = require('../common/inspector-helper.js'); + +common.skipIfInspectorDisabled(); + +it('should not attach a debugger with SIGUSR1', { skip: common.isWindows }, async () => { + const file = fixtures.path('disable-signal/sigusr1.js'); + const instance = new NodeInstance(['--disable-sigusr1'], undefined, file); + + instance.on('stderr', common.mustNotCall()); + const loggedPid = await new Promise((resolve) => { + instance.on('stdout', (data) => { + const matches = data.match(/pid is (\d+)/); + if (matches) resolve(Number(matches[1])); + }); + }); + + assert.ok(process.kill(instance.pid, 'SIGUSR1')); + assert.strictEqual(loggedPid, instance.pid); + assert.ok(await instance.kill()); +}); From 6fa6d699ff8ee90a468943680fb3bbe18b3c1846 Mon Sep 17 00:00:00 2001 From: Jelle van der Waa Date: Fri, 6 Dec 2024 18:24:41 +0100 Subject: [PATCH 061/158] test: make test-crypto-hash compatible with OpenSSL > 3.4.0 OpenSSL 3.4 has a breaking change where the outputLength is now mandatory for shake* hash algorithms. https://github.com/openssl/openssl/commit/b911fef216d1386210ec24e201d54d709528abb4 PR-URL: https://github.com/nodejs/node/pull/56160 Refs: https://github.com/nodejs/node/issues/56159 Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: James M Snell Reviewed-By: Richard Lau --- test/parallel/test-crypto-hash.js | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/test/parallel/test-crypto-hash.js b/test/parallel/test-crypto-hash.js index 83218c105a4596..ca8f630b4bb7e7 100644 --- a/test/parallel/test-crypto-hash.js +++ b/test/parallel/test-crypto-hash.js @@ -7,6 +7,7 @@ const assert = require('assert'); const crypto = require('crypto'); const fs = require('fs'); +const { hasOpenSSL } = common; const fixtures = require('../common/fixtures'); let cryptoType; @@ -182,19 +183,21 @@ assert.throws( // Test XOF hash functions and the outputLength option. { - // Default outputLengths. - assert.strictEqual(crypto.createHash('shake128').digest('hex'), - '7f9c2ba4e88f827d616045507605853e'); - assert.strictEqual(crypto.createHash('shake128', null).digest('hex'), - '7f9c2ba4e88f827d616045507605853e'); - assert.strictEqual(crypto.createHash('shake256').digest('hex'), - '46b9dd2b0ba88d13233b3feb743eeb24' + - '3fcd52ea62b81b82b50c27646ed5762f'); - assert.strictEqual(crypto.createHash('shake256', { outputLength: 0 }) - .copy() // Default outputLength. - .digest('hex'), - '46b9dd2b0ba88d13233b3feb743eeb24' + - '3fcd52ea62b81b82b50c27646ed5762f'); + // Default outputLengths. Since OpenSSL 3.4 an outputLength is mandatory + if (!hasOpenSSL(3, 4)) { + assert.strictEqual(crypto.createHash('shake128').digest('hex'), + '7f9c2ba4e88f827d616045507605853e'); + assert.strictEqual(crypto.createHash('shake128', null).digest('hex'), + '7f9c2ba4e88f827d616045507605853e'); + assert.strictEqual(crypto.createHash('shake256').digest('hex'), + '46b9dd2b0ba88d13233b3feb743eeb24' + + '3fcd52ea62b81b82b50c27646ed5762f'); + assert.strictEqual(crypto.createHash('shake256', { outputLength: 0 }) + .copy() // Default outputLength. + .digest('hex'), + '46b9dd2b0ba88d13233b3feb743eeb24' + + '3fcd52ea62b81b82b50c27646ed5762f'); + } // Short outputLengths. assert.strictEqual(crypto.createHash('shake128', { outputLength: 0 }) From 0d772a963e0ff50512dcc38c26b4adeda1ec7398 Mon Sep 17 00:00:00 2001 From: Jelle van der Waa Date: Fri, 6 Dec 2024 19:14:16 +0100 Subject: [PATCH 062/158] test: disable openssl 3.4.0 incompatible tests The shake128/shake256 hashing algorithms broke due to an OpenSSL 3.4 incompatible change and now throws an Error. PR-URL: https://github.com/nodejs/node/pull/56160 Refs: https://github.com/nodejs/node/issues/56159 Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: James M Snell Reviewed-By: Richard Lau --- test/parallel/test-crypto-oneshot-hash.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/parallel/test-crypto-oneshot-hash.js b/test/parallel/test-crypto-oneshot-hash.js index 56b4c04a65a1c1..69051c43d9e882 100644 --- a/test/parallel/test-crypto-oneshot-hash.js +++ b/test/parallel/test-crypto-oneshot-hash.js @@ -31,6 +31,9 @@ const methods = crypto.getHashes(); const input = fs.readFileSync(fixtures.path('utf8_test_text.txt')); for (const method of methods) { + // Skip failing tests on OpenSSL 3.4.0 + if (method.startsWith('shake') && common.hasOpenSSL(3, 4)) + continue; for (const outputEncoding of ['buffer', 'hex', 'base64', undefined]) { const oldDigest = crypto.createHash(method).update(input).digest(outputEncoding || 'hex'); const digestFromBuffer = crypto.hash(method, input, outputEncoding); From b1c54439aee2805911f3a308f2cd04468fddb412 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Mon, 13 Jan 2025 15:51:55 -0500 Subject: [PATCH 063/158] test: update test-child-process-bad-stdio to use node:test This commit updates test/parallel/test-child-process-bad-stdio.js to use node:test. This change prevents multiple child processes from being spawned in parallel, which can be problematic in the CI. PR-URL: https://github.com/nodejs/node/pull/56562 Reviewed-By: Jake Yuesong Li Reviewed-By: James M Snell Reviewed-By: Yagiz Nizipli --- test/parallel/test-child-process-bad-stdio.js | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/test/parallel/test-child-process-bad-stdio.js b/test/parallel/test-child-process-bad-stdio.js index 90e8ddd0215a2b..b612fc832281a6 100644 --- a/test/parallel/test-child-process-bad-stdio.js +++ b/test/parallel/test-child-process-bad-stdio.js @@ -1,21 +1,23 @@ 'use strict'; // Flags: --expose-internals const common = require('../common'); -const assert = require('assert'); -const cp = require('child_process'); if (process.argv[2] === 'child') { setTimeout(() => {}, common.platformTimeout(100)); return; } +const assert = require('node:assert'); +const cp = require('node:child_process'); +const { mock, test } = require('node:test'); +const { ChildProcess } = require('internal/child_process'); + // Monkey patch spawn() to create a child process normally, but destroy the // stdout and stderr streams. This replicates the conditions where the streams // cannot be properly created. -const ChildProcess = require('internal/child_process').ChildProcess; const original = ChildProcess.prototype.spawn; -ChildProcess.prototype.spawn = function() { +mock.method(ChildProcess.prototype, 'spawn', function() { const err = original.apply(this, arguments); this.stdout.destroy(); @@ -24,7 +26,7 @@ ChildProcess.prototype.spawn = function() { this.stderr = null; return err; -}; +}); function createChild(options, callback) { const [cmd, opts] = common.escapePOSIXShell`"${process.execPath}" "${__filename}" child`; @@ -33,32 +35,32 @@ function createChild(options, callback) { return cp.exec(cmd, options, common.mustCall(callback)); } -// Verify that normal execution of a child process is handled. -{ +test('normal execution of a child process is handled', (_, done) => { createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, null); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); -} +}); -// Verify that execution with an error event is handled. -{ +test('execution with an error event is handled', (_, done) => { const error = new Error('foo'); const child = createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, error); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); child.emit('error', error); -} +}); -// Verify that execution with a killed process is handled. -{ +test('execution with a killed process is handled', (_, done) => { createChild({ timeout: 1 }, (err, stdout, stderr) => { assert.strictEqual(err.killed, true); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); -} +}); From 6af5053153d11ab841e15d0b1242c308dae04e30 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Mon, 13 Jan 2025 22:39:25 +0100 Subject: [PATCH 064/158] doc: document CLI way to open the nodejs/bluesky PR PR-URL: https://github.com/nodejs/node/pull/56506 Reviewed-By: James M Snell Reviewed-By: Michael Dawson Reviewed-By: Ruy Adorno --- doc/contributing/releases.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/contributing/releases.md b/doc/contributing/releases.md index b3b20b8ae5589e..40ba96da602033 100644 --- a/doc/contributing/releases.md +++ b/doc/contributing/releases.md @@ -1102,6 +1102,22 @@ The post content can be as simple as: > … > something here about notable changes +You can create the PR for the release post on nodejs/bluesky with the following: + +```bash +# Create a PR for a post: +gh workflow run create-pr.yml --repo "https://github.com/nodejs/bluesky" \ + -F prTitle='vx.x.x release announcement' \ + -F richText='Node.js vx.x.x is out. Check the blog post at https://nodejs.org/…. TL;DR is + +- New feature +- …' + +# Create a PR for a retweet: +gh workflow run create-pr.yml --repo "https://github.com/nodejs/bluesky" \ + -F prTitle='Retweet vx.x.x release announcement' -F postURL=… +``` +
Security release From 3915152c36a15743e4bca67822f05f729faa05ea Mon Sep 17 00:00:00 2001 From: Santiago Gimeno Date: Sat, 11 Jan 2025 14:17:38 +0100 Subject: [PATCH 065/158] crypto: fix checkPrime crash with large buffers Fixes: https://github.com/nodejs/node/issues/56512 PR-URL: https://github.com/nodejs/node/pull/56559 Reviewed-By: Colin Ihrig Reviewed-By: James M Snell Reviewed-By: Joyee Cheung --- src/crypto/crypto_random.cc | 5 +++++ test/parallel/test-crypto-prime.js | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/crypto/crypto_random.cc b/src/crypto/crypto_random.cc index a6a206455b52c3..0c26834de3126c 100644 --- a/src/crypto/crypto_random.cc +++ b/src/crypto/crypto_random.cc @@ -176,6 +176,11 @@ Maybe CheckPrimeTraits::AdditionalConfig( ArrayBufferOrViewContents candidate(args[offset]); params->candidate = BignumPointer(candidate.data(), candidate.size()); + if (!params->candidate) { + ThrowCryptoError( + Environment::GetCurrent(args), ERR_get_error(), "BignumPointer"); + return Nothing(); + } CHECK(args[offset + 1]->IsInt32()); // Checks params->checks = args[offset + 1].As()->Value(); diff --git a/test/parallel/test-crypto-prime.js b/test/parallel/test-crypto-prime.js index 2e7edb9074d090..5ffdc1394282be 100644 --- a/test/parallel/test-crypto-prime.js +++ b/test/parallel/test-crypto-prime.js @@ -254,6 +254,19 @@ for (const checks of [-(2 ** 31), -1, 2 ** 31, 2 ** 32 - 1, 2 ** 32, 2 ** 50]) { }); } +{ + const bytes = Buffer.alloc(67108864); + bytes[0] = 0x1; + assert.throws(() => checkPrime(bytes, common.mustNotCall()), { + code: 'ERR_OSSL_BN_BIGNUM_TOO_LONG', + message: /bignum too long/ + }); + assert.throws(() => checkPrimeSync(bytes), { + code: 'ERR_OSSL_BN_BIGNUM_TOO_LONG', + message: /bignum too long/ + }); +} + assert(!checkPrimeSync(Buffer.from([0x1]))); assert(checkPrimeSync(Buffer.from([0x2]))); assert(checkPrimeSync(Buffer.from([0x3]))); From 36dd9ecc41522adbf37f4e7f295efc20c67cf729 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 13 Jan 2025 19:55:57 -0500 Subject: [PATCH 066/158] crypto: update root certificates to NSS 3.107 This is the certdata.txt[0] from NSS 3.107. This is the version of NSS that shipped in Firefox 134.0 on 2025-01-07. Certificates removed: - SecureSign RootCA11 - Entrust Root Certification Authority - G4 - Security Communication RootCA3 [0] https://raw.githubusercontent.com/nss-dev/nss/refs/tags/NSS_3_107_RTM/lib/ckfw/builtins/certdata.txt PR-URL: https://github.com/nodejs/node/pull/56566 Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau Reviewed-By: James M Snell --- src/node_root_certs.h | 86 ----------- tools/certdata.txt | 343 ++++-------------------------------------- 2 files changed, 33 insertions(+), 396 deletions(-) diff --git a/src/node_root_certs.h b/src/node_root_certs.h index 2c8670be39e586..ee229fc7740627 100644 --- a/src/node_root_certs.h +++ b/src/node_root_certs.h @@ -569,27 +569,6 @@ "dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=\n" "-----END CERTIFICATE-----", -/* SecureSign RootCA11 */ -"-----BEGIN CERTIFICATE-----\n" -"MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UE\n" -"ChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJl\n" -"U2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNV\n" -"BAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRww\n" -"GgYDVQQDExNTZWN1cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n" -"CgKCAQEA/XeqpRyQBTvLTJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1y\n" -"fIw/XwFndBWW4wI8h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyK\n" -"yiyhFTOVMdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9\n" -"UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V\n" -"1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsCh8U+iQIDAQABo0Iw\n" -"QDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud\n" -"EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKChOBZmLqdWHyGcBvod7bkixTgm2E5P\n" -"7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI\n" -"6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAY\n" -"ga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR\n" -"7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN\n" -"QSdJQO7e5iNEOdyhIta6A/I=\n" -"-----END CERTIFICATE-----", - /* Microsec e-Szigno Root CA 2009 */ "-----BEGIN CERTIFICATE-----\n" "MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJI\n" @@ -2310,40 +2289,6 @@ "UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEGmpv0\n" "-----END CERTIFICATE-----", -/* Entrust Root Certification Authority - G4 */ -"-----BEGIN CERTIFICATE-----\n" -"MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAwgb4xCzAJ\n" -"BgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVu\n" -"dHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJbmMu\n" -"IC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0\n" -"aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDEx\n" -"Nlowgb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9T\n" -"ZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRy\n" -"dXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg\n" -"Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0BAQEFAAOC\n" -"Ag8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ2K+EbTBwXX7zLtJT\n" -"meH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3ET+iq4qA7ec2/a0My3dl0ELn3\n" -"9GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1\n" -"NhSXNdh4IjVS70O92yfbYVaCNNzLiGAMC1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc\n" -"0ieCU0plUmr1POeo8pyvi73TDtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh64\n" -"3IhuJbNsZvc8kPNXwbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmO\n" -"eX7m640A2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm\n" -"nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8dWbrAuMI\n" -"NClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwlN4y6mACXi0mWHv0l\n" -"iqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNjc0kCAwEAAaNCMEAwDwYDVR0T\n" -"AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJ84xFYjwznooHFs6FRM5Og6\n" -"sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ\n" -"9POrYs4QjbRaZIxowLByQzTSGwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5\n" -"ZDIBf9PD3Vht7LGrhFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0g\n" -"kLpHZPt/B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI\n" -"AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbwH5Lk6rWS\n" -"02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+b7DUUH8i119lAg2m\n" -"9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk2fCfDrGA4tGeEWSpiBE6doLl\n" -"YsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47OlIQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuI\n" -"jnDrnBdSqEGULoe256YSxXXfW8AKbnuk5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh\n" -"7DE9ZapD8j3fcEThuk0mEDuYn/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==\n" -"-----END CERTIFICATE-----", - /* Microsoft ECC Root Certificate Authority 2017 */ "-----BEGIN CERTIFICATE-----\n" "MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQswCQYDVQQG\n" @@ -3161,37 +3106,6 @@ "Nzf43TNRnXCve1XYAS59BWQOhriR\n" "-----END CERTIFICATE-----", -/* Security Communication RootCA3 */ -"-----BEGIN CERTIFICATE-----\n" -"MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNVBAYTAkpQ\n" -"MSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScwJQYDVQQDEx5TZWN1\n" -"cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2MDYxNzE2WhcNMzgwMTE4MDYx\n" -"NzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4s\n" -"TFRELjEnMCUGA1UEAxMeU2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkq\n" -"hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltz\n" -"kBtnTCHsXzW7OT4rCmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOz\n" -"QD11EKzAlrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG\n" -"TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF79+qMHIjH\n" -"7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK78vknR+/RiTlDxN/e\n" -"4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4g160a75BflcJdURQVc1aEWEh\n" -"CmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3weGVPKp7FKFSBWFHA9K4IsD50VHUeAR/94\n" -"mQ4xr28+j+2GaR57GIgUssL8gjMunEst+3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8Ps\n" -"OC0RLoi/1D+tEjtCrI8Cbn3M0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xV\n" -"J/CvHozJgyJUt5rQT9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEA\n" -"AaNCMEAwHQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP\n" -"BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybSYpOnpSNy\n" -"ByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PAFNr0Y/Dq9HHuTofj\n" -"can0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd9XbXv8S2gVj/yP9kaWJ5rW4O\n" -"H3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQIUYWg9by0F1jqClx6vWPGOi//lkkZhOpn\n" -"2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQD\n" -"dwj98ClZXSEIx2C/pHF7uNkegr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO\n" -"0QR4ynKudtml+LLfiAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU\n" -"1cXrvMUVnuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD\n" -"2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//1ZqmfHAu\n" -"c1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8LaadTdJ0MN1kURXbg4NR\n" -"16/9M51NZg==\n" -"-----END CERTIFICATE-----", - /* Security Communication ECC RootCA1 */ "-----BEGIN CERTIFICATE-----\n" "MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYTAkpQMSUw\n" diff --git a/tools/certdata.txt b/tools/certdata.txt index 110a814718cfd7..e0f60abcd6cf62 100644 --- a/tools/certdata.txt +++ b/tools/certdata.txt @@ -323,7 +323,10 @@ CKA_VALUE MULTILINE_OCTAL \174\136\232\166\351\131\220\305\174\203\065\021\145\121 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "Entrust.net Premium 2048 Secure Server CA" @@ -627,7 +630,10 @@ CKA_VALUE MULTILINE_OCTAL \036\177\132\264\074 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "Entrust Root Certification Authority" @@ -3808,140 +3814,6 @@ CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_TRUSTED_DELEGATOR CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE -# -# Certificate "SecureSign RootCA11" -# -# Issuer: CN=SecureSign RootCA11,O="Japan Certification Services, Inc.",C=JP -# Serial Number: 1 (0x1) -# Subject: CN=SecureSign RootCA11,O="Japan Certification Services, Inc.",C=JP -# Not Valid Before: Wed Apr 08 04:56:47 2009 -# Not Valid After : Sun Apr 08 04:56:47 2029 -# Fingerprint (SHA-256): BF:0F:EE:FB:9E:3A:58:1A:D5:F9:E9:DB:75:89:98:57:43:D2:61:08:5C:4D:31:4F:6F:5D:72:59:AA:42:16:12 -# Fingerprint (SHA1): 3B:C4:9F:48:F8:F3:73:A0:9C:1E:BD:F8:5B:B1:C3:65:C7:D8:11:B3 -CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE -CKA_TOKEN CK_BBOOL CK_TRUE -CKA_PRIVATE CK_BBOOL CK_FALSE -CKA_MODIFIABLE CK_BBOOL CK_FALSE -CKA_LABEL UTF8 "SecureSign RootCA11" -CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509 -CKA_SUBJECT MULTILINE_OCTAL -\060\130\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\053\060\051\006\003\125\004\012\023\042\112\141\160\141\156\040 -\103\145\162\164\151\146\151\143\141\164\151\157\156\040\123\145 -\162\166\151\143\145\163\054\040\111\156\143\056\061\034\060\032 -\006\003\125\004\003\023\023\123\145\143\165\162\145\123\151\147 -\156\040\122\157\157\164\103\101\061\061 -END -CKA_ID UTF8 "0" -CKA_ISSUER MULTILINE_OCTAL -\060\130\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\053\060\051\006\003\125\004\012\023\042\112\141\160\141\156\040 -\103\145\162\164\151\146\151\143\141\164\151\157\156\040\123\145 -\162\166\151\143\145\163\054\040\111\156\143\056\061\034\060\032 -\006\003\125\004\003\023\023\123\145\143\165\162\145\123\151\147 -\156\040\122\157\157\164\103\101\061\061 -END -CKA_SERIAL_NUMBER MULTILINE_OCTAL -\002\001\001 -END -CKA_VALUE MULTILINE_OCTAL -\060\202\003\155\060\202\002\125\240\003\002\001\002\002\001\001 -\060\015\006\011\052\206\110\206\367\015\001\001\005\005\000\060 -\130\061\013\060\011\006\003\125\004\006\023\002\112\120\061\053 -\060\051\006\003\125\004\012\023\042\112\141\160\141\156\040\103 -\145\162\164\151\146\151\143\141\164\151\157\156\040\123\145\162 -\166\151\143\145\163\054\040\111\156\143\056\061\034\060\032\006 -\003\125\004\003\023\023\123\145\143\165\162\145\123\151\147\156 -\040\122\157\157\164\103\101\061\061\060\036\027\015\060\071\060 -\064\060\070\060\064\065\066\064\067\132\027\015\062\071\060\064 -\060\070\060\064\065\066\064\067\132\060\130\061\013\060\011\006 -\003\125\004\006\023\002\112\120\061\053\060\051\006\003\125\004 -\012\023\042\112\141\160\141\156\040\103\145\162\164\151\146\151 -\143\141\164\151\157\156\040\123\145\162\166\151\143\145\163\054 -\040\111\156\143\056\061\034\060\032\006\003\125\004\003\023\023 -\123\145\143\165\162\145\123\151\147\156\040\122\157\157\164\103 -\101\061\061\060\202\001\042\060\015\006\011\052\206\110\206\367 -\015\001\001\001\005\000\003\202\001\017\000\060\202\001\012\002 -\202\001\001\000\375\167\252\245\034\220\005\073\313\114\233\063 -\213\132\024\105\244\347\220\026\321\337\127\322\041\020\244\027 -\375\337\254\326\037\247\344\333\174\367\354\337\270\003\332\224 -\130\375\135\162\174\214\077\137\001\147\164\025\226\343\002\074 -\207\333\256\313\001\216\302\363\146\306\205\105\364\002\306\072 -\265\142\262\257\372\234\277\244\346\324\200\060\230\363\015\266 -\223\217\251\324\330\066\362\260\374\212\312\054\241\025\063\225 -\061\332\300\033\362\356\142\231\206\143\077\277\335\223\052\203 -\250\166\271\023\037\267\316\116\102\205\217\042\347\056\032\362 -\225\011\262\005\265\104\116\167\241\040\275\251\362\116\012\175 -\120\255\365\005\015\105\117\106\161\375\050\076\123\373\004\330 -\055\327\145\035\112\033\372\317\073\260\061\232\065\156\310\213 -\006\323\000\221\362\224\010\145\114\261\064\006\000\172\211\342 -\360\307\003\131\317\325\326\350\247\062\263\346\230\100\206\305 -\315\047\022\213\314\173\316\267\021\074\142\140\007\043\076\053 -\100\156\224\200\011\155\266\263\157\167\157\065\010\120\373\002 -\207\305\076\211\002\003\001\000\001\243\102\060\100\060\035\006 -\003\125\035\016\004\026\004\024\133\370\115\117\262\245\206\324 -\072\322\361\143\232\240\276\011\366\127\267\336\060\016\006\003 -\125\035\017\001\001\377\004\004\003\002\001\006\060\017\006\003 -\125\035\023\001\001\377\004\005\060\003\001\001\377\060\015\006 -\011\052\206\110\206\367\015\001\001\005\005\000\003\202\001\001 -\000\240\241\070\026\146\056\247\126\037\041\234\006\372\035\355 -\271\042\305\070\046\330\116\117\354\243\177\171\336\106\041\241 -\207\167\217\007\010\232\262\244\305\257\017\062\230\013\174\146 -\051\266\233\175\045\122\111\103\253\114\056\053\156\172\160\257 -\026\016\343\002\154\373\102\346\030\235\105\330\125\310\350\073 -\335\347\341\364\056\013\034\064\134\154\130\112\373\214\210\120 -\137\225\034\277\355\253\042\265\145\263\205\272\236\017\270\255 -\345\172\033\212\120\072\035\275\015\274\173\124\120\013\271\102 -\257\125\240\030\201\255\145\231\357\276\344\234\277\304\205\253 -\101\262\124\157\334\045\315\355\170\342\216\014\215\011\111\335 -\143\173\132\151\226\002\041\250\275\122\131\351\175\065\313\310 -\122\312\177\201\376\331\153\323\367\021\355\045\337\370\347\371 -\244\372\162\227\204\123\015\245\320\062\030\121\166\131\024\154 -\017\353\354\137\200\214\165\103\203\303\205\230\377\114\236\055 -\015\344\167\203\223\116\265\226\007\213\050\023\233\214\031\215 -\101\047\111\100\356\336\346\043\104\071\334\241\042\326\272\003 -\362 -END -CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE -CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE - -# Trust for "SecureSign RootCA11" -# Issuer: CN=SecureSign RootCA11,O="Japan Certification Services, Inc.",C=JP -# Serial Number: 1 (0x1) -# Subject: CN=SecureSign RootCA11,O="Japan Certification Services, Inc.",C=JP -# Not Valid Before: Wed Apr 08 04:56:47 2009 -# Not Valid After : Sun Apr 08 04:56:47 2029 -# Fingerprint (SHA-256): BF:0F:EE:FB:9E:3A:58:1A:D5:F9:E9:DB:75:89:98:57:43:D2:61:08:5C:4D:31:4F:6F:5D:72:59:AA:42:16:12 -# Fingerprint (SHA1): 3B:C4:9F:48:F8:F3:73:A0:9C:1E:BD:F8:5B:B1:C3:65:C7:D8:11:B3 -CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST -CKA_TOKEN CK_BBOOL CK_TRUE -CKA_PRIVATE CK_BBOOL CK_FALSE -CKA_MODIFIABLE CK_BBOOL CK_FALSE -CKA_LABEL UTF8 "SecureSign RootCA11" -CKA_CERT_SHA1_HASH MULTILINE_OCTAL -\073\304\237\110\370\363\163\240\234\036\275\370\133\261\303\145 -\307\330\021\263 -END -CKA_CERT_MD5_HASH MULTILINE_OCTAL -\267\122\164\342\222\264\200\223\362\165\344\314\327\362\352\046 -END -CKA_ISSUER MULTILINE_OCTAL -\060\130\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\053\060\051\006\003\125\004\012\023\042\112\141\160\141\156\040 -\103\145\162\164\151\146\151\143\141\164\151\157\156\040\123\145 -\162\166\151\143\145\163\054\040\111\156\143\056\061\034\060\032 -\006\003\125\004\003\023\023\123\145\143\165\162\145\123\151\147 -\156\040\122\157\157\164\103\101\061\061 -END -CKA_SERIAL_NUMBER MULTILINE_OCTAL -\002\001\001 -END -CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR -CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_MUST_VERIFY_TRUST -CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST -CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE - # # Certificate "Microsec e-Szigno Root CA 2009" # @@ -4939,7 +4811,10 @@ CKA_VALUE MULTILINE_OCTAL \007\072\027\144\265\004\265\043\041\231\012\225\073\227\174\357 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "AffirmTrust Commercial" @@ -5067,7 +4942,10 @@ CKA_VALUE MULTILINE_OCTAL \355\132\000\124\205\034\026\066\222\014\134\372\246\255\277\333 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "AffirmTrust Networking" @@ -5227,7 +5105,10 @@ CKA_VALUE MULTILINE_OCTAL \051\340\266\270\011\150\031\034\030\103 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "AffirmTrust Premium" @@ -5335,7 +5216,10 @@ CKA_VALUE MULTILINE_OCTAL \214\171 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "AffirmTrust Premium ECC" @@ -10269,7 +10153,10 @@ CKA_VALUE MULTILINE_OCTAL \105\366 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "Entrust Root Certification Authority - G2" @@ -10416,7 +10303,10 @@ CKA_VALUE MULTILINE_OCTAL \231\267\046\101\133\045\140\256\320\110\032\356\006 END CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE +# For Server Distrust After: Sat Nov 30 23:59:59 2024 +CKA_NSS_SERVER_DISTRUST_AFTER MULTILINE_OCTAL +\062\064\061\061\063\060\062\063\065\071\065\071\132 +END CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE # Trust for "Entrust Root Certification Authority - EC1" @@ -15014,7 +14904,7 @@ CKA_SERIAL_NUMBER MULTILINE_OCTAL \002\021\000\331\265\103\177\257\251\071\017\000\000\000\000\125 \145\255\130 END -CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR +CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_MUST_VERIFY_TRUST CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_TRUSTED_DELEGATOR CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE @@ -21228,173 +21118,6 @@ CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_TRUSTED_DELEGATOR CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE -# -# Certificate "Security Communication RootCA3" -# -# Issuer: CN=Security Communication RootCA3,O="SECOM Trust Systems CO.,LTD.",C=JP -# Serial Number:00:e1:7c:37:40:fd:1b:fe:67 -# Subject: CN=Security Communication RootCA3,O="SECOM Trust Systems CO.,LTD.",C=JP -# Not Valid Before: Thu Jun 16 06:17:16 2016 -# Not Valid After : Mon Jan 18 06:17:16 2038 -# Fingerprint (SHA-256): 24:A5:5C:2A:B0:51:44:2D:06:17:76:65:41:23:9A:4A:D0:32:D7:C5:51:75:AA:34:FF:DE:2F:BC:4F:5C:52:94 -# Fingerprint (SHA1): C3:03:C8:22:74:92:E5:61:A2:9C:5F:79:91:2B:1E:44:13:91:30:3A -CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE -CKA_TOKEN CK_BBOOL CK_TRUE -CKA_PRIVATE CK_BBOOL CK_FALSE -CKA_MODIFIABLE CK_BBOOL CK_FALSE -CKA_LABEL UTF8 "Security Communication RootCA3" -CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509 -CKA_SUBJECT MULTILINE_OCTAL -\060\135\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\045\060\043\006\003\125\004\012\023\034\123\105\103\117\115\040 -\124\162\165\163\164\040\123\171\163\164\145\155\163\040\103\117 -\056\054\114\124\104\056\061\047\060\045\006\003\125\004\003\023 -\036\123\145\143\165\162\151\164\171\040\103\157\155\155\165\156 -\151\143\141\164\151\157\156\040\122\157\157\164\103\101\063 -END -CKA_ID UTF8 "0" -CKA_ISSUER MULTILINE_OCTAL -\060\135\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\045\060\043\006\003\125\004\012\023\034\123\105\103\117\115\040 -\124\162\165\163\164\040\123\171\163\164\145\155\163\040\103\117 -\056\054\114\124\104\056\061\047\060\045\006\003\125\004\003\023 -\036\123\145\143\165\162\151\164\171\040\103\157\155\155\165\156 -\151\143\141\164\151\157\156\040\122\157\157\164\103\101\063 -END -CKA_SERIAL_NUMBER MULTILINE_OCTAL -\002\011\000\341\174\067\100\375\033\376\147 -END -CKA_VALUE MULTILINE_OCTAL -\060\202\005\177\060\202\003\147\240\003\002\001\002\002\011\000 -\341\174\067\100\375\033\376\147\060\015\006\011\052\206\110\206 -\367\015\001\001\014\005\000\060\135\061\013\060\011\006\003\125 -\004\006\023\002\112\120\061\045\060\043\006\003\125\004\012\023 -\034\123\105\103\117\115\040\124\162\165\163\164\040\123\171\163 -\164\145\155\163\040\103\117\056\054\114\124\104\056\061\047\060 -\045\006\003\125\004\003\023\036\123\145\143\165\162\151\164\171 -\040\103\157\155\155\165\156\151\143\141\164\151\157\156\040\122 -\157\157\164\103\101\063\060\036\027\015\061\066\060\066\061\066 -\060\066\061\067\061\066\132\027\015\063\070\060\061\061\070\060 -\066\061\067\061\066\132\060\135\061\013\060\011\006\003\125\004 -\006\023\002\112\120\061\045\060\043\006\003\125\004\012\023\034 -\123\105\103\117\115\040\124\162\165\163\164\040\123\171\163\164 -\145\155\163\040\103\117\056\054\114\124\104\056\061\047\060\045 -\006\003\125\004\003\023\036\123\145\143\165\162\151\164\171\040 -\103\157\155\155\165\156\151\143\141\164\151\157\156\040\122\157 -\157\164\103\101\063\060\202\002\042\060\015\006\011\052\206\110 -\206\367\015\001\001\001\005\000\003\202\002\017\000\060\202\002 -\012\002\202\002\001\000\343\311\162\111\367\060\336\011\174\251 -\100\201\130\323\264\072\335\272\141\017\223\120\156\151\074\065 -\302\356\133\163\220\033\147\114\041\354\137\065\273\071\076\053 -\012\140\357\273\155\053\206\373\161\242\310\254\344\126\224\371 -\311\257\261\162\324\040\254\164\322\270\025\255\121\376\205\164 -\241\271\020\376\005\200\371\122\223\263\100\075\165\020\254\300 -\226\267\247\176\166\274\343\033\122\031\316\021\037\013\004\064 -\365\330\365\151\074\167\363\144\364\015\252\205\336\340\011\120 -\004\027\226\204\267\310\212\274\115\162\374\034\273\317\363\006 -\115\371\237\144\367\176\246\146\206\065\161\310\021\200\114\301 -\161\100\130\036\276\240\163\366\374\076\120\341\340\057\046\075 -\176\134\043\265\171\160\336\372\340\321\245\326\014\101\161\173 -\367\352\214\034\210\307\354\213\365\321\057\125\226\106\174\132 -\073\130\073\373\272\330\055\265\045\332\172\116\317\104\256\041 -\246\236\230\312\040\156\174\273\210\205\133\373\300\020\142\273 -\362\371\047\107\357\321\211\071\103\304\337\336\341\101\277\124 -\163\040\227\055\154\332\363\324\007\243\346\271\330\157\256\374 -\214\031\056\323\147\147\053\225\333\130\134\265\152\002\363\270 -\203\136\264\153\276\101\176\127\011\165\104\120\125\315\132\021 -\141\041\012\141\302\251\210\375\023\274\055\211\057\315\141\340 -\225\276\312\265\173\341\173\064\147\013\037\266\014\307\174\036 -\031\123\312\247\261\112\025\040\126\024\160\075\053\202\054\017 -\235\025\035\107\200\107\377\170\231\016\061\257\157\076\217\355 -\206\151\036\173\030\210\024\262\302\374\202\063\056\234\113\055 -\373\160\073\161\252\053\173\046\047\363\032\302\334\373\027\270 -\241\352\313\240\264\256\323\224\176\172\320\253\303\354\070\055 -\021\056\210\277\324\077\255\022\073\102\254\217\002\156\175\314 -\321\137\141\276\241\274\072\152\110\352\046\125\042\026\135\137 -\015\377\047\063\237\030\003\164\212\133\122\040\107\153\105\115 -\042\167\214\125\047\360\257\036\214\311\203\042\124\267\232\320 -\117\331\316\374\331\056\034\226\050\261\002\323\003\275\045\122 -\034\064\146\117\043\253\364\167\202\226\035\321\127\060\010\021 -\005\375\127\321\331\307\002\003\001\000\001\243\102\060\100\060 -\035\006\003\125\035\016\004\026\004\024\144\024\174\374\130\162 -\026\246\012\051\064\025\157\052\313\274\374\257\250\253\060\016 -\006\003\125\035\017\001\001\377\004\004\003\002\001\006\060\017 -\006\003\125\035\023\001\001\377\004\005\060\003\001\001\377\060 -\015\006\011\052\206\110\206\367\015\001\001\014\005\000\003\202 -\002\001\000\334\002\043\010\342\357\041\072\307\015\267\046\322 -\142\223\247\245\043\162\007\040\202\140\337\030\327\124\255\151 -\045\222\236\331\024\317\231\271\122\201\317\256\154\212\073\132 -\071\310\154\001\103\302\042\155\002\360\142\315\116\143\103\300 -\024\332\364\143\360\352\364\161\356\116\207\343\161\251\364\311 -\127\345\056\137\034\171\273\043\252\207\104\127\351\275\065\115 -\101\273\113\050\243\230\262\033\331\013\027\007\345\367\352\235 -\365\166\327\277\304\266\201\130\377\310\377\144\151\142\171\255 -\156\016\037\177\356\035\151\345\267\162\161\263\376\245\001\065 -\224\124\053\300\122\155\217\125\304\311\322\270\313\312\064\010 -\121\205\240\365\274\264\027\130\352\012\134\172\275\143\306\072 -\057\377\226\111\031\204\352\147\330\004\261\141\364\000\133\112 -\267\234\161\067\031\205\171\277\201\260\307\023\016\166\161\076 -\072\200\006\256\006\026\247\215\265\302\304\313\377\100\245\134 -\215\245\311\072\355\162\201\312\134\230\074\322\064\003\167\010 -\375\360\051\131\135\041\010\307\140\277\244\161\173\270\331\036 -\202\276\011\257\145\157\050\253\277\113\265\356\076\010\107\047 -\240\017\157\017\213\077\254\225\030\363\271\016\334\147\125\156 -\142\236\106\016\321\004\170\312\162\256\166\331\245\370\262\337 -\210\011\141\213\357\044\116\321\131\077\132\324\075\311\223\074 -\053\144\365\201\015\026\226\367\222\303\376\061\157\350\052\062 -\164\016\364\114\230\112\030\016\060\124\325\305\353\274\305\025 -\236\350\231\041\353\047\053\011\012\333\361\346\160\030\126\273 -\014\344\276\371\350\020\244\023\222\270\034\340\333\147\035\123 -\003\244\042\247\334\135\222\020\074\352\377\374\033\020\032\303 -\330\320\234\235\145\313\320\053\047\061\003\036\066\341\075\166 -\165\014\377\105\046\271\335\121\274\043\307\137\330\330\207\020 -\100\022\015\075\070\067\347\104\074\030\300\123\011\144\217\377 -\325\232\246\174\160\056\163\125\041\350\337\377\203\271\035\076 -\062\036\326\246\175\054\361\146\351\134\035\247\243\316\136\045 -\062\053\343\225\254\052\007\316\264\050\170\206\074\055\246\235 -\115\322\164\060\335\144\121\025\333\203\203\121\327\257\375\063 -\235\115\146 -END -CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE -CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE -CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE - -# Trust for "Security Communication RootCA3" -# Issuer: CN=Security Communication RootCA3,O="SECOM Trust Systems CO.,LTD.",C=JP -# Serial Number:00:e1:7c:37:40:fd:1b:fe:67 -# Subject: CN=Security Communication RootCA3,O="SECOM Trust Systems CO.,LTD.",C=JP -# Not Valid Before: Thu Jun 16 06:17:16 2016 -# Not Valid After : Mon Jan 18 06:17:16 2038 -# Fingerprint (SHA-256): 24:A5:5C:2A:B0:51:44:2D:06:17:76:65:41:23:9A:4A:D0:32:D7:C5:51:75:AA:34:FF:DE:2F:BC:4F:5C:52:94 -# Fingerprint (SHA1): C3:03:C8:22:74:92:E5:61:A2:9C:5F:79:91:2B:1E:44:13:91:30:3A -CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST -CKA_TOKEN CK_BBOOL CK_TRUE -CKA_PRIVATE CK_BBOOL CK_FALSE -CKA_MODIFIABLE CK_BBOOL CK_FALSE -CKA_LABEL UTF8 "Security Communication RootCA3" -CKA_CERT_SHA1_HASH MULTILINE_OCTAL -\303\003\310\042\164\222\345\141\242\234\137\171\221\053\036\104 -\023\221\060\072 -END -CKA_CERT_MD5_HASH MULTILINE_OCTAL -\034\232\026\377\236\134\340\115\212\024\001\364\065\135\051\046 -END -CKA_ISSUER MULTILINE_OCTAL -\060\135\061\013\060\011\006\003\125\004\006\023\002\112\120\061 -\045\060\043\006\003\125\004\012\023\034\123\105\103\117\115\040 -\124\162\165\163\164\040\123\171\163\164\145\155\163\040\103\117 -\056\054\114\124\104\056\061\047\060\045\006\003\125\004\003\023 -\036\123\145\143\165\162\151\164\171\040\103\157\155\155\165\156 -\151\143\141\164\151\157\156\040\122\157\157\164\103\101\063 -END -CKA_SERIAL_NUMBER MULTILINE_OCTAL -\002\011\000\341\174\067\100\375\033\376\147 -END -CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR -CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_TRUSTED_DELEGATOR -CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST -CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE - # # Certificate "Security Communication ECC RootCA1" # From 47fad8cbc0fce7d5fbf35e0285af455802572c1d Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 13 Jan 2025 19:56:11 -0500 Subject: [PATCH 067/158] deps: update simdutf to 6.0.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56567 Reviewed-By: Luigi Pinca Reviewed-By: James M Snell Reviewed-By: Ulises Gascón --- deps/simdutf/simdutf.cpp | 495 +++++++++++++++--- deps/simdutf/simdutf.h | 1044 +++++++++++++++++++++++++++++++++++--- 2 files changed, 1403 insertions(+), 136 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index 12a2f494e0a7aa..21962c3bad378d 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-12-26 12:42:33 -0500. Do not edit! */ +/* auto-generated on 2025-01-08 17:51:07 -0500. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" // We include base64_tables once. @@ -17142,8 +17142,33 @@ size_t convert_masked_utf8_to_utf16(const char *input, for (int k = 0; k < 6; k++) { utf16_output[k] = buffer[k]; } // the loop might compiler to a couple of instructions. - utf16_output += 6; // We wrote 3 32-bit surrogate pairs. - return 12; // We consumed 12 bytes. + // We need some validation. See + // https://github.com/simdutf/simdutf/pull/631 +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + uint8x16_t expected_mask = simdutf_make_uint8x16_t( + 0xf8, 0xc0, 0xc0, 0xc0, 0xf8, 0xc0, 0xc0, 0xc0, 0xf8, 0xc0, 0xc0, + 0xc0, 0x0, 0x0, 0x0, 0x0); +#else + uint8x16_t expected_mask = {0xf8, 0xc0, 0xc0, 0xc0, 0xf8, 0xc0, + 0xc0, 0xc0, 0xf8, 0xc0, 0xc0, 0xc0, + 0x0, 0x0, 0x0, 0x0}; +#endif +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + uint8x16_t expected = simdutf_make_uint8x16_t( + 0xf0, 0x80, 0x80, 0x80, 0xf0, 0x80, 0x80, 0x80, 0xf0, 0x80, 0x80, + 0x80, 0x0, 0x0, 0x0, 0x0); +#else + uint8x16_t expected = {0xf0, 0x80, 0x80, 0x80, 0xf0, 0x80, 0x80, 0x80, + 0xf0, 0x80, 0x80, 0x80, 0x0, 0x0, 0x0, 0x0}; +#endif + uint8x16_t check = vceqq_u8(vandq_u8(in, expected_mask), expected); + bool correct = (vminvq_u32(vreinterpretq_u32_u8(check)) == 0xFFFFFFFF); + // The validation is just three instructions and it is not on a critical + // path. + if (correct) { + utf16_output += 6; // We wrote 3 32-bit surrogate pairs. + } + return 12; // We consumed 12 bytes. } // 3 1-4 byte sequences uint8x16_t sh = vld1q_u8(reinterpret_cast( @@ -18634,6 +18659,12 @@ compress_decode_base64(char *dst, const char_type *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -22881,6 +22912,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -22926,6 +22963,12 @@ simdutf_warn_unused full_result implementation::base64_to_binary_details( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -22977,6 +23020,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -23022,6 +23071,12 @@ simdutf_warn_unused full_result implementation::base64_to_binary_details( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -23058,6 +23113,8 @@ size_t implementation::binary_to_base64(const char *input, size_t length, #endif #if SIMDUTF_IMPLEMENTATION_ICELAKE /* begin file src/icelake/implementation.cpp */ +#include +#include /* begin file src/simdutf/icelake/begin.h */ @@ -26106,17 +26163,17 @@ bool validate_ascii(const char *buf, size_t len) { /* begin file src/icelake/icelake_utf32_validation.inl.cpp */ // file included directly -const char32_t *validate_utf32(const char32_t *buf, size_t len) { - if (len < 16) { - return buf; +bool validate_utf32(const char32_t *buf, size_t len) { + if (len == 0) { + return true; } - const char32_t *end = buf + len - 16; + const char32_t *end = buf + len; const __m512i offset = _mm512_set1_epi32((uint32_t)0xffff2000); __m512i currentmax = _mm512_setzero_si512(); __m512i currentoffsetmax = _mm512_setzero_si512(); - while (buf <= end) { + while (buf < end - 16) { __m512i utf32 = _mm512_loadu_si512((const __m512i *)buf); buf += 16; currentoffsetmax = @@ -26124,20 +26181,26 @@ const char32_t *validate_utf32(const char32_t *buf, size_t len) { currentmax = _mm512_max_epu32(utf32, currentmax); } + __m512i utf32 = + _mm512_maskz_loadu_epi32(__mmask16((1 << (end - buf)) - 1), buf); + currentoffsetmax = + _mm512_max_epu32(_mm512_add_epi32(utf32, offset), currentoffsetmax); + currentmax = _mm512_max_epu32(utf32, currentmax); + const __m512i standardmax = _mm512_set1_epi32((uint32_t)0x10ffff); const __m512i standardoffsetmax = _mm512_set1_epi32((uint32_t)0xfffff7ff); __m512i is_zero = _mm512_xor_si512(_mm512_max_epu32(currentmax, standardmax), standardmax); if (_mm512_test_epi8_mask(is_zero, is_zero) != 0) { - return nullptr; + return false; } is_zero = _mm512_xor_si512( _mm512_max_epu32(currentoffsetmax, standardoffsetmax), standardoffsetmax); if (_mm512_test_epi8_mask(is_zero, is_zero) != 0) { - return nullptr; + return false; } - return buf; + return true; } /* end file src/icelake/icelake_utf32_validation.inl.cpp */ /* begin file src/icelake/icelake_convert_latin1_to_utf8.inl.cpp */ @@ -26556,6 +26619,12 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -26753,24 +26822,76 @@ implementation::detect_encodings(const char *input, size_t length) const noexcept { // If there is a BOM, then we trust it. auto bom_encoding = simdutf::BOM::check_bom(input, length); - // todo: convert to a one-pass algorithm if (bom_encoding != encoding_type::unspecified) { return bom_encoding; } + int out = 0; - if (validate_utf8(input, length)) { + uint32_t utf16_err = (length % 2); + uint32_t utf32_err = (length % 4); + uint32_t ends_with_high = 0; + avx512_utf8_checker checker{}; + const __m512i offset = _mm512_set1_epi32((uint32_t)0xffff2000); + __m512i currentmax = _mm512_setzero_si512(); + __m512i currentoffsetmax = _mm512_setzero_si512(); + const char *ptr = input; + const char *end = ptr + length; + for (; end - ptr >= 64; ptr += 64) { + // utf8 checks + const __m512i data = _mm512_loadu_si512((const __m512i *)ptr); + checker.check_next_input(data); + + // utf16le_checks + __m512i diff = _mm512_sub_epi16(data, _mm512_set1_epi16(uint16_t(0xD800))); + __mmask32 surrogates = + _mm512_cmplt_epu16_mask(diff, _mm512_set1_epi16(uint16_t(0x0800))); + __mmask32 highsurrogates = + _mm512_cmplt_epu16_mask(diff, _mm512_set1_epi16(uint16_t(0x0400))); + __mmask32 lowsurrogates = surrogates ^ highsurrogates; + utf16_err |= (((highsurrogates << 1) | ends_with_high) != lowsurrogates); + ends_with_high = ((highsurrogates & 0x80000000) != 0); + + // utf32le checks + currentoffsetmax = + _mm512_max_epu32(_mm512_add_epi32(data, offset), currentoffsetmax); + currentmax = _mm512_max_epu32(data, currentmax); + } + + // last block with 0 <= len < 64 + __mmask64 read_mask = (__mmask64(1) << (end - ptr)) - 1; + const __m512i data = _mm512_maskz_loadu_epi8(read_mask, (const __m512i *)ptr); + checker.check_next_input(data); + + __m512i diff = _mm512_sub_epi16(data, _mm512_set1_epi16(uint16_t(0xD800))); + __mmask32 surrogates = + _mm512_cmplt_epu16_mask(diff, _mm512_set1_epi16(uint16_t(0x0800))); + __mmask32 highsurrogates = + _mm512_cmplt_epu16_mask(diff, _mm512_set1_epi16(uint16_t(0x0400))); + __mmask32 lowsurrogates = surrogates ^ highsurrogates; + utf16_err |= (((highsurrogates << 1) | ends_with_high) != lowsurrogates); + + currentoffsetmax = + _mm512_max_epu32(_mm512_add_epi32(data, offset), currentoffsetmax); + currentmax = _mm512_max_epu32(data, currentmax); + + const __m512i standardmax = _mm512_set1_epi32((uint32_t)0x10ffff); + const __m512i standardoffsetmax = _mm512_set1_epi32((uint32_t)0xfffff7ff); + __m512i is_zero = + _mm512_xor_si512(_mm512_max_epu32(currentmax, standardmax), standardmax); + utf32_err |= (_mm512_test_epi8_mask(is_zero, is_zero) != 0); + is_zero = _mm512_xor_si512( + _mm512_max_epu32(currentoffsetmax, standardoffsetmax), standardoffsetmax); + utf32_err |= (_mm512_test_epi8_mask(is_zero, is_zero) != 0); + checker.check_eof(); + bool is_valid_utf8 = !checker.errors(); + if (is_valid_utf8) { out |= encoding_type::UTF8; } - if ((length % 2) == 0) { - if (validate_utf16le(reinterpret_cast(input), - length / 2)) { - out |= encoding_type::UTF16_LE; - } + if (utf16_err == 0) { + out |= encoding_type::UTF16_LE; } - if ((length % 4) == 0) { - if (validate_utf32(reinterpret_cast(input), length / 4)) { - out |= encoding_type::UTF32_LE; - } + if (utf32_err == 0) { + out |= encoding_type::UTF32_LE; } return out; } @@ -27092,14 +27213,7 @@ simdutf_warn_unused result implementation::validate_utf16be_with_errors( simdutf_warn_unused bool implementation::validate_utf32(const char32_t *buf, size_t len) const noexcept { - const char32_t *tail = icelake::validate_utf32(buf, len); - if (tail) { - return scalar::utf32::validate(tail, len - (tail - buf)); - } else { - // we come here if there was an error, or buf was nullptr which may happen - // for empty input. - return len == 0; - } + return icelake::validate_utf32(buf, len); } simdutf_warn_unused result implementation::validate_utf32_with_errors( @@ -27980,16 +28094,7 @@ implementation::count_utf8(const char *input, size_t length) const noexcept { } } - __m256i first_half = _mm512_extracti64x4_epi64(unrolled_popcount, 0); - __m256i second_half = _mm512_extracti64x4_epi64(unrolled_popcount, 1); - answer -= (size_t)_mm256_extract_epi64(first_half, 0) + - (size_t)_mm256_extract_epi64(first_half, 1) + - (size_t)_mm256_extract_epi64(first_half, 2) + - (size_t)_mm256_extract_epi64(first_half, 3) + - (size_t)_mm256_extract_epi64(second_half, 0) + - (size_t)_mm256_extract_epi64(second_half, 1) + - (size_t)_mm256_extract_epi64(second_half, 2) + - (size_t)_mm256_extract_epi64(second_half, 3); + answer -= _mm512_reduce_add_epi64(unrolled_popcount); return answer + scalar::utf8::count_code_points( reinterpret_cast(str + i), length - i); @@ -28175,16 +28280,7 @@ simdutf_warn_unused size_t implementation::utf8_length_from_latin1( eight_64bits, _mm512_sad_epu8(runner, _mm512_setzero_si512())); } - __m256i first_half = _mm512_extracti64x4_epi64(eight_64bits, 0); - __m256i second_half = _mm512_extracti64x4_epi64(eight_64bits, 1); - answer += (size_t)_mm256_extract_epi64(first_half, 0) + - (size_t)_mm256_extract_epi64(first_half, 1) + - (size_t)_mm256_extract_epi64(first_half, 2) + - (size_t)_mm256_extract_epi64(first_half, 3) + - (size_t)_mm256_extract_epi64(second_half, 0) + - (size_t)_mm256_extract_epi64(second_half, 1) + - (size_t)_mm256_extract_epi64(second_half, 2) + - (size_t)_mm256_extract_epi64(second_half, 3); + answer += _mm512_reduce_add_epi64(eight_64bits); } else if (answer > 0) { for (; i + sizeof(__m512i) <= length; i += sizeof(__m512i)) { __m512i latin = _mm512_loadu_si512((const __m512i *)(str + i)); @@ -31471,6 +31567,12 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -33426,20 +33528,103 @@ implementation::detect_encodings(const char *input, if (bom_encoding != encoding_type::unspecified) { return bom_encoding; } + int out = 0; - if (validate_utf8(input, length)) { + uint32_t utf16_err = (length % 2); + uint32_t utf32_err = (length % 4); + uint32_t ends_with_high = 0; + const auto v_d8 = simd8::splat(0xd8); + const auto v_f8 = simd8::splat(0xf8); + const auto v_fc = simd8::splat(0xfc); + const auto v_dc = simd8::splat(0xdc); + const __m256i standardmax = _mm256_set1_epi32(0x10ffff); + const __m256i offset = _mm256_set1_epi32(0xffff2000); + const __m256i standardoffsetmax = _mm256_set1_epi32(0xfffff7ff); + __m256i currentmax = _mm256_setzero_si256(); + __m256i currentoffsetmax = _mm256_setzero_si256(); + + utf8_checker c{}; + buf_block_reader<64> reader(reinterpret_cast(input), length); + while (reader.has_full_block()) { + simd::simd8x64 in(reader.full_block()); + // utf8 checks + c.check_next_input(in); + + // utf16le checks + auto in0 = simd16(in.chunks[0]); + auto in1 = simd16(in.chunks[1]); + const auto t0 = in0.shr<8>(); + const auto t1 = in1.shr<8>(); + const auto in2 = simd16::pack(t0, t1); + const auto surrogates_wordmask = (in2 & v_f8) == v_d8; + const uint32_t surrogates_bitmask = surrogates_wordmask.to_bitmask(); + const auto vL = (in2 & v_fc) == v_dc; + const uint32_t L = vL.to_bitmask(); + const uint32_t H = L ^ surrogates_bitmask; + utf16_err |= (((H << 1) | ends_with_high) != L); + ends_with_high = (H & 0x80000000) != 0; + + // utf32le checks + currentmax = _mm256_max_epu32(in.chunks[0], currentmax); + currentoffsetmax = _mm256_max_epu32(_mm256_add_epi32(in.chunks[0], offset), + currentoffsetmax); + currentmax = _mm256_max_epu32(in.chunks[1], currentmax); + currentoffsetmax = _mm256_max_epu32(_mm256_add_epi32(in.chunks[1], offset), + currentoffsetmax); + + reader.advance(); + } + + uint8_t block[64]{}; + size_t idx = reader.block_index(); + std::memcpy(block, &input[idx], length - idx); + simd::simd8x64 in(block); + c.check_next_input(in); + + // utf16le last block check + auto in0 = simd16(in.chunks[0]); + auto in1 = simd16(in.chunks[1]); + const auto t0 = in0.shr<8>(); + const auto t1 = in1.shr<8>(); + const auto in2 = simd16::pack(t0, t1); + const auto surrogates_wordmask = (in2 & v_f8) == v_d8; + const uint32_t surrogates_bitmask = surrogates_wordmask.to_bitmask(); + const auto vL = (in2 & v_fc) == v_dc; + const uint32_t L = vL.to_bitmask(); + const uint32_t H = L ^ surrogates_bitmask; + utf16_err |= (((H << 1) | ends_with_high) != L); + // this is required to check for last byte ending in high and end of input + // is reached + ends_with_high = (H & 0x80000000) != 0; + utf16_err |= ends_with_high; + + // utf32le last block check + currentmax = _mm256_max_epu32(in.chunks[0], currentmax); + currentoffsetmax = _mm256_max_epu32(_mm256_add_epi32(in.chunks[0], offset), + currentoffsetmax); + currentmax = _mm256_max_epu32(in.chunks[1], currentmax); + currentoffsetmax = _mm256_max_epu32(_mm256_add_epi32(in.chunks[1], offset), + currentoffsetmax); + + reader.advance(); + + c.check_eof(); + bool is_valid_utf8 = !c.errors(); + __m256i is_zero = + _mm256_xor_si256(_mm256_max_epu32(currentmax, standardmax), standardmax); + utf32_err |= (_mm256_testz_si256(is_zero, is_zero) == 0); + + is_zero = _mm256_xor_si256( + _mm256_max_epu32(currentoffsetmax, standardoffsetmax), standardoffsetmax); + utf32_err |= (_mm256_testz_si256(is_zero, is_zero) == 0); + if (is_valid_utf8) { out |= encoding_type::UTF8; } - if ((length % 2) == 0) { - if (validate_utf16le(reinterpret_cast(input), - length / 2)) { - out |= encoding_type::UTF16_LE; - } + if (utf16_err == 0) { + out |= encoding_type::UTF16_LE; } - if ((length % 4) == 0) { - if (validate_utf32(reinterpret_cast(input), length / 4)) { - out |= encoding_type::UTF32_LE; - } + if (utf32_err == 0) { + out |= encoding_type::UTF32_LE; } return out; } @@ -36317,6 +36502,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -36368,6 +36559,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -38120,6 +38317,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -38165,6 +38368,12 @@ simdutf_warn_unused full_result implementation::base64_to_binary_details( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -38216,6 +38425,12 @@ simdutf_warn_unused result implementation::base64_to_binary( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation}; } return {SUCCESS, 0}; @@ -38261,6 +38476,12 @@ simdutf_warn_unused full_result implementation::base64_to_binary_details( } if (length == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -41328,6 +41549,12 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -43282,24 +43509,138 @@ implementation::detect_encodings(const char *input, size_t length) const noexcept { // If there is a BOM, then we trust it. auto bom_encoding = simdutf::BOM::check_bom(input, length); - // todo: reimplement as a one-pass algorithm. if (bom_encoding != encoding_type::unspecified) { return bom_encoding; } + int out = 0; - if (validate_utf8(input, length)) { + uint32_t utf16_err = (length % 2); + uint32_t utf32_err = (length % 4); + uint32_t ends_with_high = 0; + const auto v_d8 = simd8::splat(0xd8); + const auto v_f8 = simd8::splat(0xf8); + const auto v_fc = simd8::splat(0xfc); + const auto v_dc = simd8::splat(0xdc); + const __m128i standardmax = _mm_set1_epi32(0x10ffff); + const __m128i offset = _mm_set1_epi32(0xffff2000); + const __m128i standardoffsetmax = _mm_set1_epi32(0xfffff7ff); + __m128i currentmax = _mm_setzero_si128(); + __m128i currentoffsetmax = _mm_setzero_si128(); + + utf8_checker c{}; + buf_block_reader<64> reader(reinterpret_cast(input), length); + while (reader.has_full_block()) { + simd::simd8x64 in(reader.full_block()); + // utf8 checks + c.check_next_input(in); + + // utf16le checks + auto in0 = simd16(in.chunks[0]); + auto in1 = simd16(in.chunks[1]); + const auto t0 = in0.shr<8>(); + const auto t1 = in1.shr<8>(); + const auto packed1 = simd16::pack(t0, t1); + auto in2 = simd16(in.chunks[2]); + auto in3 = simd16(in.chunks[3]); + const auto t2 = in2.shr<8>(); + const auto t3 = in3.shr<8>(); + const auto packed2 = simd16::pack(t2, t3); + + const auto surrogates_wordmask_lo = (packed1 & v_f8) == v_d8; + const auto surrogates_wordmask_hi = (packed2 & v_f8) == v_d8; + const uint32_t surrogates_bitmask = + (surrogates_wordmask_hi.to_bitmask() << 16) | + surrogates_wordmask_lo.to_bitmask(); + const auto vL_lo = (packed1 & v_fc) == v_dc; + const auto vL_hi = (packed2 & v_fc) == v_dc; + const uint32_t L = (vL_hi.to_bitmask() << 16) | vL_lo.to_bitmask(); + const uint32_t H = L ^ surrogates_bitmask; + utf16_err |= (((H << 1) | ends_with_high) != L); + ends_with_high = (H & 0x80000000) != 0; + + // utf32le checks + currentmax = _mm_max_epu32(in.chunks[0], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[0], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[1], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[1], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[2], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[2], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[3], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[3], offset), currentoffsetmax); + + reader.advance(); + } + + uint8_t block[64]{}; + size_t idx = reader.block_index(); + std::memcpy(block, &input[idx], length - idx); + simd::simd8x64 in(block); + c.check_next_input(in); + + // utf16le last block check + auto in0 = simd16(in.chunks[0]); + auto in1 = simd16(in.chunks[1]); + const auto t0 = in0.shr<8>(); + const auto t1 = in1.shr<8>(); + const auto packed1 = simd16::pack(t0, t1); + auto in2 = simd16(in.chunks[2]); + auto in3 = simd16(in.chunks[3]); + const auto t2 = in2.shr<8>(); + const auto t3 = in3.shr<8>(); + const auto packed2 = simd16::pack(t2, t3); + + const auto surrogates_wordmask_lo = (packed1 & v_f8) == v_d8; + const auto surrogates_wordmask_hi = (packed2 & v_f8) == v_d8; + const uint32_t surrogates_bitmask = + (surrogates_wordmask_hi.to_bitmask() << 16) | + surrogates_wordmask_lo.to_bitmask(); + const auto vL_lo = (packed1 & v_fc) == v_dc; + const auto vL_hi = (packed2 & v_fc) == v_dc; + const uint32_t L = (vL_hi.to_bitmask() << 16) | vL_lo.to_bitmask(); + const uint32_t H = L ^ surrogates_bitmask; + utf16_err |= (((H << 1) | ends_with_high) != L); + // this is required to check for last byte ending in high and end of input + // is reached + ends_with_high = (H & 0x80000000) != 0; + utf16_err |= ends_with_high; + + // utf32le last block check + currentmax = _mm_max_epu32(in.chunks[0], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[0], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[1], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[1], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[2], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[2], offset), currentoffsetmax); + currentmax = _mm_max_epu32(in.chunks[3], currentmax); + currentoffsetmax = + _mm_max_epu32(_mm_add_epi32(in.chunks[3], offset), currentoffsetmax); + + reader.advance(); + + c.check_eof(); + bool is_valid_utf8 = !c.errors(); + __m128i is_zero = + _mm_xor_si128(_mm_max_epu32(currentmax, standardmax), standardmax); + utf32_err |= (_mm_test_all_zeros(is_zero, is_zero) == 0); + + is_zero = _mm_xor_si128(_mm_max_epu32(currentoffsetmax, standardoffsetmax), + standardoffsetmax); + utf32_err |= (_mm_test_all_zeros(is_zero, is_zero) == 0); + if (is_valid_utf8) { out |= encoding_type::UTF8; } - if ((length % 2) == 0) { - if (validate_utf16le(reinterpret_cast(input), - length / 2)) { - out |= encoding_type::UTF16_LE; - } + if (utf16_err == 0) { + out |= encoding_type::UTF16_LE; } - if ((length % 4) == 0) { - if (validate_utf32(reinterpret_cast(input), length / 4)) { - out |= encoding_type::UTF32_LE; - } + if (utf32_err == 0) { + out |= encoding_type::UTF32_LE; } return out; } @@ -47336,6 +47677,12 @@ compress_decode_base64(char *dst, const char_type *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; @@ -53668,6 +54015,12 @@ compress_decode_base64(char *dst, const chartype *src, size_t srclen, } if (srclen == 0) { if (!ignore_garbage && equalsigns > 0) { + if (last_chunk_options == last_chunk_handling_options::strict) { + return {BASE64_INPUT_REMAINDER, 0, 0}; + } else if (last_chunk_options == + last_chunk_handling_options::stop_before_partial) { + return {SUCCESS, 0, 0}; + } return {INVALID_BASE64_CHARACTER, equallocation, 0}; } return {SUCCESS, 0, 0}; diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index 9a4b4580da91a1..4bec0cf300292a 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-12-26 12:42:33 -0500. Do not edit! */ +/* auto-generated on 2025-01-08 17:51:07 -0500. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -55,21 +55,35 @@ #ifndef SIMDUTF_COMMON_DEFS_H #define SIMDUTF_COMMON_DEFS_H -#include /* begin file include/simdutf/portability.h */ #ifndef SIMDUTF_PORTABILITY_H #define SIMDUTF_PORTABILITY_H + +#include #include #include #include -#include -#include #ifndef _WIN32 // strcasecmp, strncasecmp #include #endif +#if defined(__apple_build_version__) + #if __apple_build_version__ < 14000000 + #define SIMDUTF_SPAN_DISABLED \ + 1 // apple-clang/13 doesn't support std::convertible_to + #endif +#endif + +#if SIMDUTF_CPLUSPLUS20 + #include + #if __cpp_concepts >= 201907L && __cpp_lib_span >= 202002L && \ + !defined(SIMDUTF_SPAN_DISABLED) + #define SIMDUTF_SPAN 1 + #endif +#endif + /** * We want to check that it is actually a little endian system at * compile-time. @@ -291,27 +305,6 @@ #define simdutf_strncasecmp strncasecmp #endif -#ifdef NDEBUG - - #ifdef SIMDUTF_VISUAL_STUDIO - #define SIMDUTF_UNREACHABLE() __assume(0) - #define SIMDUTF_ASSUME(COND) __assume(COND) - #else - #define SIMDUTF_UNREACHABLE() __builtin_unreachable(); - #define SIMDUTF_ASSUME(COND) \ - do { \ - if (!(COND)) \ - __builtin_unreachable(); \ - } while (0) - #endif - -#else // NDEBUG - - #define SIMDUTF_UNREACHABLE() assert(0); - #define SIMDUTF_ASSUME(COND) assert(COND) - -#endif - #if defined(__GNUC__) && !defined(__clang__) #if __GNUC__ >= 11 #define SIMDUTF_GCC11ORMORE 1 @@ -402,27 +395,6 @@ #endif // SIMDUTF_AVX512_H_ /* end file include/simdutf/avx512.h */ -#if defined(__GNUC__) - // Marks a block with a name so that MCA analysis can see it. - #define SIMDUTF_BEGIN_DEBUG_BLOCK(name) \ - __asm volatile("# LLVM-MCA-BEGIN " #name); - #define SIMDUTF_END_DEBUG_BLOCK(name) __asm volatile("# LLVM-MCA-END " #name); - #define SIMDUTF_DEBUG_BLOCK(name, block) \ - BEGIN_DEBUG_BLOCK(name); \ - block; \ - END_DEBUG_BLOCK(name); -#else - #define SIMDUTF_BEGIN_DEBUG_BLOCK(name) - #define SIMDUTF_END_DEBUG_BLOCK(name) - #define SIMDUTF_DEBUG_BLOCK(name, block) -#endif - -// Align to N-byte boundary -#define SIMDUTF_ROUNDUP_N(a, n) (((a) + ((n) - 1)) & ~((n) - 1)) -#define SIMDUTF_ROUNDDOWN_N(a, n) ((a) & ~((n) - 1)) - -#define SIMDUTF_ISALIGNED_N(ptr, n) (((uintptr_t)(ptr) & ((n) - 1)) == 0) - #if defined(SIMDUTF_REGULAR_VISUAL_STUDIO) #define SIMDUTF_DEPRECATED __declspec(deprecated) @@ -536,18 +508,11 @@ #endif #endif -/// If EXPR is an error, returns it. -#define SIMDUTF_TRY(EXPR) \ - { \ - auto _err = (EXPR); \ - if (_err) { \ - return _err; \ - } \ - } - #endif // SIMDUTF_COMMON_DEFS_H /* end file include/simdutf/common_defs.h */ /* begin file include/simdutf/encoding_types.h */ +#ifndef SIMDUTF_ENCODING_TYPES_H +#define SIMDUTF_ENCODING_TYPES_H #include namespace simdutf { @@ -591,6 +556,7 @@ size_t bom_byte_size(encoding_type bom); } // namespace BOM } // namespace simdutf +#endif /* end file include/simdutf/encoding_types.h */ /* begin file include/simdutf/error.h */ #ifndef SIMDUTF_ERROR_H @@ -675,22 +641,22 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.7.2" +#define SIMDUTF_VERSION "6.0.3" namespace simdutf { enum { /** * The major version (MAJOR.minor.revision) of simdutf being used. */ - SIMDUTF_VERSION_MAJOR = 5, + SIMDUTF_VERSION_MAJOR = 6, /** * The minor version (major.MINOR.revision) of simdutf being used. */ - SIMDUTF_VERSION_MINOR = 7, + SIMDUTF_VERSION_MINOR = 0, /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 2 + SIMDUTF_VERSION_REVISION = 3 }; } // namespace simdutf @@ -699,11 +665,10 @@ enum { /* begin file include/simdutf/implementation.h */ #ifndef SIMDUTF_IMPLEMENTATION_H #define SIMDUTF_IMPLEMENTATION_H -#include #if !defined(SIMDUTF_NO_THREADS) #include #endif -#include +#include #include /* begin file include/simdutf/internal/isadetection.h */ /* From @@ -1031,8 +996,61 @@ static inline uint32_t detect_supported_architectures() { #endif // SIMDutf_INTERNAL_ISADETECTION_H /* end file include/simdutf/internal/isadetection.h */ +#if SIMDUTF_SPAN + #include + #include + #include +#endif + namespace simdutf { +#if SIMDUTF_SPAN +/// helpers placed in namespace detail are not a part of the public API +namespace detail { +/** + * matches a byte, in the many ways C++ allows. note that these + * are all distinct types. + */ +template +concept byte_like = std::is_same_v || // + std::is_same_v || // + std::is_same_v || // + std::is_same_v; + +template +concept is_byte_like = byte_like>; + +template +concept is_pointer = std::is_pointer_v; + +/** + * matches anything that behaves like std::span and points to character-like + * data such as: std::byte, char, unsigned char, signed char, std::int8_t, + * std::uint8_t + */ +template +concept input_span_of_byte_like = requires(const T &t) { + { t.size() } noexcept -> std::convertible_to; + { t.data() } noexcept -> is_pointer; + { *t.data() } noexcept -> is_byte_like; +}; + +template +concept is_mutable = !std::is_const_v>; + +/** + * like span_of_byte_like, but for an output span (intended to be written to) + */ +template +concept output_span_of_byte_like = requires(T &t) { + { t.size() } noexcept -> std::convertible_to; + { t.data() } noexcept -> is_pointer; + { *t.data() } noexcept -> is_byte_like; + { *t.data() } noexcept -> is_mutable; +}; +} // namespace detail +#endif + /** * Autodetect the encoding of the input, a single encoding is recommended. * E.g., the function might return simdutf::encoding_type::UTF8, @@ -1049,6 +1067,25 @@ simdutf_really_inline simdutf_warn_unused simdutf::encoding_type autodetect_encoding(const uint8_t *input, size_t length) noexcept { return autodetect_encoding(reinterpret_cast(input), length); } +#if SIMDUTF_SPAN +/** + * Autodetect the encoding of the input, a single encoding is recommended. + * E.g., the function might return simdutf::encoding_type::UTF8, + * simdutf::encoding_type::UTF16_LE, simdutf::encoding_type::UTF16_BE, or + * simdutf::encoding_type::UTF32_LE. + * + * @param input the string to analyze. can be a anything span-like that has a + * data() and size() that points to character data: std::string, + * std::string_view, std::vector, std::span etc. + * @return the detected encoding type + */ +simdutf_really_inline simdutf_warn_unused simdutf::encoding_type +autodetect_encoding( + const detail::input_span_of_byte_like auto &input) noexcept { + return autodetect_encoding(reinterpret_cast(input.data()), + input.size()); +} +#endif /** * Autodetect the possible encodings of the input in one pass. @@ -1067,6 +1104,13 @@ simdutf_really_inline simdutf_warn_unused int detect_encodings(const uint8_t *input, size_t length) noexcept { return detect_encodings(reinterpret_cast(input), length); } +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused int +detect_encodings(const detail::input_span_of_byte_like auto &input) noexcept { + return detect_encodings(reinterpret_cast(input.data()), + input.size()); +} +#endif /** * Validate the UTF-8 string. This function may be best when you expect @@ -1080,6 +1124,13 @@ detect_encodings(const uint8_t *input, size_t length) noexcept { * @return true if and only if the string is valid UTF-8. */ simdutf_warn_unused bool validate_utf8(const char *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_utf8(const detail::input_span_of_byte_like auto &input) noexcept { + return validate_utf8(reinterpret_cast(input.data()), + input.size()); +} +#endif /** * Validate the UTF-8 string and stop on error. @@ -1095,6 +1146,13 @@ simdutf_warn_unused bool validate_utf8(const char *buf, size_t len) noexcept; */ simdutf_warn_unused result validate_utf8_with_errors(const char *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result validate_utf8_with_errors( + const detail::input_span_of_byte_like auto &input) noexcept { + return validate_utf8_with_errors(reinterpret_cast(input.data()), + input.size()); +} +#endif /** * Validate the ASCII string. @@ -1106,6 +1164,13 @@ simdutf_warn_unused result validate_utf8_with_errors(const char *buf, * @return true if and only if the string is valid ASCII. */ simdutf_warn_unused bool validate_ascii(const char *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_ascii(const detail::input_span_of_byte_like auto &input) noexcept { + return validate_ascii(reinterpret_cast(input.data()), + input.size()); +} +#endif /** * Validate the ASCII string and stop on error. It might be faster than @@ -1122,6 +1187,13 @@ simdutf_warn_unused bool validate_ascii(const char *buf, size_t len) noexcept; */ simdutf_warn_unused result validate_ascii_with_errors(const char *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result validate_ascii_with_errors( + const detail::input_span_of_byte_like auto &input) noexcept { + return validate_ascii_with_errors( + reinterpret_cast(input.data()), input.size()); +} +#endif /** * Using native endianness; Validate the UTF-16 string. @@ -1139,6 +1211,12 @@ simdutf_warn_unused result validate_ascii_with_errors(const char *buf, */ simdutf_warn_unused bool validate_utf16(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_utf16(std::span input) noexcept { + return validate_utf16(input.data(), input.size()); +} +#endif /** * Validate the UTF-16LE string. This function may be best when you expect @@ -1156,6 +1234,12 @@ simdutf_warn_unused bool validate_utf16(const char16_t *buf, */ simdutf_warn_unused bool validate_utf16le(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_utf16le(std::span input) noexcept { + return validate_utf16le(input.data(), input.size()); +} +#endif /** * Validate the UTF-16BE string. This function may be best when you expect @@ -1173,6 +1257,12 @@ simdutf_warn_unused bool validate_utf16le(const char16_t *buf, */ simdutf_warn_unused bool validate_utf16be(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_utf16be(std::span input) noexcept { + return validate_utf16be(input.data(), input.size()); +} +#endif /** * Using native endianness; Validate the UTF-16 string and stop on error. @@ -1193,6 +1283,12 @@ simdutf_warn_unused bool validate_utf16be(const char16_t *buf, */ simdutf_warn_unused result validate_utf16_with_errors(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +validate_utf16_with_errors(std::span input) noexcept { + return validate_utf16_with_errors(input.data(), input.size()); +} +#endif /** * Validate the UTF-16LE string and stop on error. It might be faster than @@ -1212,6 +1308,12 @@ simdutf_warn_unused result validate_utf16_with_errors(const char16_t *buf, */ simdutf_warn_unused result validate_utf16le_with_errors(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +validate_utf16le_with_errors(std::span input) noexcept { + return validate_utf16le_with_errors(input.data(), input.size()); +} +#endif /** * Validate the UTF-16BE string and stop on error. It might be faster than @@ -1231,6 +1333,12 @@ simdutf_warn_unused result validate_utf16le_with_errors(const char16_t *buf, */ simdutf_warn_unused result validate_utf16be_with_errors(const char16_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +validate_utf16be_with_errors(std::span input) noexcept { + return validate_utf16be_with_errors(input.data(), input.size()); +} +#endif /** * Validate the UTF-32 string. This function may be best when you expect @@ -1248,6 +1356,12 @@ simdutf_warn_unused result validate_utf16be_with_errors(const char16_t *buf, */ simdutf_warn_unused bool validate_utf32(const char32_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused bool +validate_utf32(std::span input) noexcept { + return validate_utf32(input.data(), input.size()); +} +#endif /** * Validate the UTF-32 string and stop on error. It might be faster than @@ -1267,6 +1381,12 @@ simdutf_warn_unused bool validate_utf32(const char32_t *buf, */ simdutf_warn_unused result validate_utf32_with_errors(const char32_t *buf, size_t len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +validate_utf32_with_errors(std::span input) noexcept { + return validate_utf32_with_errors(input.data(), input.size()); +} +#endif /** * Convert Latin1 string into UTF8 string. @@ -1281,6 +1401,15 @@ simdutf_warn_unused result validate_utf32_with_errors(const char32_t *buf, simdutf_warn_unused size_t convert_latin1_to_utf8(const char *input, size_t length, char *utf8_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_latin1_to_utf8( + const detail::input_span_of_byte_like auto &latin1_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_latin1_to_utf8( + reinterpret_cast(latin1_input.data()), latin1_input.size(), + utf8_output.data()); +} +#endif /** * Convert Latin1 string into UTF8 string with output limit. @@ -1296,6 +1425,21 @@ simdutf_warn_unused size_t convert_latin1_to_utf8(const char *input, simdutf_warn_unused size_t convert_latin1_to_utf8_safe(const char *input, size_t length, char *utf8_output, size_t utf8_len) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_latin1_to_utf8_safe( + const detail::input_span_of_byte_like auto &input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + // implementation note: outputspan is a forwarding ref to avoid copying and + // allow both lvalues and rvalues. std::span can be copied without problems, + // but std::vector should not, and this function should accept both. it will + // allow using an owning rvalue ref (example: passing a temporary std::string) + // as output, but the user will quickly find out that he has no way of getting + // the data out of the object in that case. + return convert_latin1_to_utf8_safe( + input.data(), input.size(), reinterpret_cast(utf8_output.data()), + utf8_output.size()); +} +#endif /** * Convert possibly Latin1 string into UTF-16LE string. @@ -1309,6 +1453,15 @@ convert_latin1_to_utf8_safe(const char *input, size_t length, char *utf8_output, */ simdutf_warn_unused size_t convert_latin1_to_utf16le( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_latin1_to_utf16le( + const detail::input_span_of_byte_like auto &latin1_input, + std::span utf16_output) noexcept { + return convert_latin1_to_utf16le( + reinterpret_cast(latin1_input.data()), latin1_input.size(), + utf16_output.data()); +} +#endif /** * Convert Latin1 string into UTF-16BE string. @@ -1322,6 +1475,14 @@ simdutf_warn_unused size_t convert_latin1_to_utf16le( */ simdutf_warn_unused size_t convert_latin1_to_utf16be( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_latin1_to_utf16be(const detail::input_span_of_byte_like auto &input, + std::span output) noexcept { + return convert_latin1_to_utf16be(reinterpret_cast(input.data()), + input.size(), output.data()); +} +#endif /** * Convert Latin1 string into UTF-32 string. @@ -1335,6 +1496,15 @@ simdutf_warn_unused size_t convert_latin1_to_utf16be( */ simdutf_warn_unused size_t convert_latin1_to_utf32( const char *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_latin1_to_utf32( + const detail::input_span_of_byte_like auto &latin1_input, + std::span utf32_output) noexcept { + return convert_latin1_to_utf32( + reinterpret_cast(latin1_input.data()), latin1_input.size(), + utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into latin1 string. @@ -1351,6 +1521,15 @@ simdutf_warn_unused size_t convert_latin1_to_utf32( simdutf_warn_unused size_t convert_utf8_to_latin1(const char *input, size_t length, char *latin1_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf8_to_latin1( + const detail::input_span_of_byte_like auto &input, + detail::output_span_of_byte_like auto &&output) noexcept { + return convert_utf8_to_latin1(reinterpret_cast(input.data()), + input.size(), + reinterpret_cast(output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-8 string into a UTF-16 @@ -1367,6 +1546,14 @@ simdutf_warn_unused size_t convert_utf8_to_latin1(const char *input, */ simdutf_warn_unused size_t convert_utf8_to_utf16( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf8_to_utf16(const detail::input_span_of_byte_like auto &input, + std::span output) noexcept { + return convert_utf8_to_utf16(reinterpret_cast(input.data()), + input.size(), output.data()); +} +#endif /** * Using native endianness, convert a Latin1 string into a UTF-16 string. @@ -1378,6 +1565,14 @@ simdutf_warn_unused size_t convert_utf8_to_utf16( */ simdutf_warn_unused size_t convert_latin1_to_utf16( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_latin1_to_utf16(const detail::input_span_of_byte_like auto &input, + std::span output) noexcept { + return convert_latin1_to_utf16(reinterpret_cast(input.data()), + input.size(), output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-16LE string. @@ -1393,6 +1588,15 @@ simdutf_warn_unused size_t convert_latin1_to_utf16( */ simdutf_warn_unused size_t convert_utf8_to_utf16le( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf8_to_utf16le(const detail::input_span_of_byte_like auto &utf8_input, + std::span utf16_output) noexcept { + return convert_utf8_to_utf16le( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-16BE string. @@ -1408,6 +1612,15 @@ simdutf_warn_unused size_t convert_utf8_to_utf16le( */ simdutf_warn_unused size_t convert_utf8_to_utf16be( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf8_to_utf16be(const detail::input_span_of_byte_like auto &utf8_input, + std::span utf16_output) noexcept { + return convert_utf8_to_utf16be( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into latin1 string with errors. @@ -1427,6 +1640,16 @@ simdutf_warn_unused size_t convert_utf8_to_utf16be( */ simdutf_warn_unused result convert_utf8_to_latin1_with_errors( const char *input, size_t length, char *latin1_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf8_to_latin1_with_errors( + const detail::input_span_of_byte_like auto &utf8_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf8_to_latin1_with_errors( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-8 string into UTF-16 @@ -1445,6 +1668,16 @@ simdutf_warn_unused result convert_utf8_to_latin1_with_errors( */ simdutf_warn_unused result convert_utf8_to_utf16_with_errors( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf8_to_utf16_with_errors( + const detail::input_span_of_byte_like auto &utf8_input, + std::span utf16_output) noexcept { + return convert_utf8_to_utf16_with_errors( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-16LE string and stop on error. @@ -1462,6 +1695,16 @@ simdutf_warn_unused result convert_utf8_to_utf16_with_errors( */ simdutf_warn_unused result convert_utf8_to_utf16le_with_errors( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf8_to_utf16le_with_errors( + const detail::input_span_of_byte_like auto &utf8_input, + std::span utf16_output) noexcept { + return convert_utf8_to_utf16le_with_errors( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-16BE string and stop on error. @@ -1479,6 +1722,16 @@ simdutf_warn_unused result convert_utf8_to_utf16le_with_errors( */ simdutf_warn_unused result convert_utf8_to_utf16be_with_errors( const char *input, size_t length, char16_t *utf16_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf8_to_utf16be_with_errors( + const detail::input_span_of_byte_like auto &utf8_input, + std::span utf16_output) noexcept { + return convert_utf8_to_utf16be_with_errors( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-32 string. @@ -1494,6 +1747,15 @@ simdutf_warn_unused result convert_utf8_to_utf16be_with_errors( */ simdutf_warn_unused size_t convert_utf8_to_utf32( const char *input, size_t length, char32_t *utf32_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf8_to_utf32(const detail::input_span_of_byte_like auto &utf8_input, + std::span utf32_output) noexcept { + return convert_utf8_to_utf32( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-8 string into UTF-32 string and stop on error. @@ -1511,6 +1773,16 @@ simdutf_warn_unused size_t convert_utf8_to_utf32( */ simdutf_warn_unused result convert_utf8_to_utf32_with_errors( const char *input, size_t length, char32_t *utf32_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf8_to_utf32_with_errors( + const detail::input_span_of_byte_like auto &utf8_input, + std::span utf32_output) noexcept { + return convert_utf8_to_utf32_with_errors( + reinterpret_cast(utf8_input.data()), utf8_input.size(), + utf32_output.data()); +} +#endif /** * Convert valid UTF-8 string into latin1 string. @@ -1533,6 +1805,15 @@ simdutf_warn_unused result convert_utf8_to_utf32_with_errors( */ simdutf_warn_unused size_t convert_valid_utf8_to_latin1( const char *input, size_t length, char *latin1_output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf8_to_latin1( + const detail::input_span_of_byte_like auto &valid_utf8_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_valid_utf8_to_latin1( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size(), latin1_output.data()); +} +#endif /** * Using native endianness, convert valid UTF-8 string into a UTF-16 string. @@ -1546,6 +1827,15 @@ simdutf_warn_unused size_t convert_valid_utf8_to_latin1( */ simdutf_warn_unused size_t convert_valid_utf8_to_utf16( const char *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf8_to_utf16( + const detail::input_span_of_byte_like auto &valid_utf8_input, + std::span utf16_output) noexcept { + return convert_valid_utf8_to_utf16( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size(), utf16_output.data()); +} +#endif /** * Convert valid UTF-8 string into UTF-16LE string. @@ -1559,6 +1849,15 @@ simdutf_warn_unused size_t convert_valid_utf8_to_utf16( */ simdutf_warn_unused size_t convert_valid_utf8_to_utf16le( const char *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf8_to_utf16le( + const detail::input_span_of_byte_like auto &valid_utf8_input, + std::span utf16_output) noexcept { + return convert_valid_utf8_to_utf16le( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size(), utf16_output.data()); +} +#endif /** * Convert valid UTF-8 string into UTF-16BE string. @@ -1572,6 +1871,15 @@ simdutf_warn_unused size_t convert_valid_utf8_to_utf16le( */ simdutf_warn_unused size_t convert_valid_utf8_to_utf16be( const char *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf8_to_utf16be( + const detail::input_span_of_byte_like auto &valid_utf8_input, + std::span utf16_output) noexcept { + return convert_valid_utf8_to_utf16be( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size(), utf16_output.data()); +} +#endif /** * Convert valid UTF-8 string into UTF-32 string. @@ -1585,6 +1893,15 @@ simdutf_warn_unused size_t convert_valid_utf8_to_utf16be( */ simdutf_warn_unused size_t convert_valid_utf8_to_utf32( const char *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf8_to_utf32( + const detail::input_span_of_byte_like auto &valid_utf8_input, + std::span utf32_output) noexcept { + return convert_valid_utf8_to_utf32( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size(), utf32_output.data()); +} +#endif /** * Return the number of bytes that this Latin1 string would require in UTF-8 @@ -1596,6 +1913,13 @@ simdutf_warn_unused size_t convert_valid_utf8_to_utf32( */ simdutf_warn_unused size_t utf8_length_from_latin1(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t utf8_length_from_latin1( + const detail::input_span_of_byte_like auto &latin1_input) noexcept { + return utf8_length_from_latin1( + reinterpret_cast(latin1_input.data()), latin1_input.size()); +} +#endif /** * Compute the number of bytes that this UTF-8 string would require in Latin1 @@ -1612,6 +1936,14 @@ simdutf_warn_unused size_t utf8_length_from_latin1(const char *input, */ simdutf_warn_unused size_t latin1_length_from_utf8(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t latin1_length_from_utf8( + const detail::input_span_of_byte_like auto &valid_utf8_input) noexcept { + return latin1_length_from_utf8( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size()); +} +#endif /** * Compute the number of 2-byte code units that this UTF-8 string would require @@ -1629,6 +1961,14 @@ simdutf_warn_unused size_t latin1_length_from_utf8(const char *input, */ simdutf_warn_unused size_t utf16_length_from_utf8(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t utf16_length_from_utf8( + const detail::input_span_of_byte_like auto &valid_utf8_input) noexcept { + return utf16_length_from_utf8( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size()); +} +#endif /** * Compute the number of 4-byte code units that this UTF-8 string would require @@ -1648,6 +1988,14 @@ simdutf_warn_unused size_t utf16_length_from_utf8(const char *input, */ simdutf_warn_unused size_t utf32_length_from_utf8(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t utf32_length_from_utf8( + const detail::input_span_of_byte_like auto &valid_utf8_input) noexcept { + return utf32_length_from_utf8( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size()); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into UTF-8 @@ -1667,6 +2015,14 @@ simdutf_warn_unused size_t utf32_length_from_utf8(const char *input, simdutf_warn_unused size_t convert_utf16_to_utf8(const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16_to_utf8( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16_to_utf8(utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into Latin1 @@ -1685,6 +2041,15 @@ simdutf_warn_unused size_t convert_utf16_to_utf8(const char16_t *input, */ simdutf_warn_unused size_t convert_utf16_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16_to_latin1( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16_to_latin1( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-16LE string into Latin1 string. @@ -1704,6 +2069,15 @@ simdutf_warn_unused size_t convert_utf16_to_latin1( */ simdutf_warn_unused size_t convert_utf16le_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16le_to_latin1( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16le_to_latin1( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-16BE string into Latin1 string. @@ -1721,6 +2095,15 @@ simdutf_warn_unused size_t convert_utf16le_to_latin1( */ simdutf_warn_unused size_t convert_utf16be_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16be_to_latin1( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16be_to_latin1( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-16LE string into UTF-8 string. @@ -1739,6 +2122,14 @@ simdutf_warn_unused size_t convert_utf16be_to_latin1( simdutf_warn_unused size_t convert_utf16le_to_utf8(const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16le_to_utf8( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16le_to_utf8(utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert possibly broken UTF-16BE string into UTF-8 string. @@ -1757,6 +2148,14 @@ simdutf_warn_unused size_t convert_utf16le_to_utf8(const char16_t *input, simdutf_warn_unused size_t convert_utf16be_to_utf8(const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf16be_to_utf8( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16be_to_utf8(utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into Latin1 @@ -1776,6 +2175,16 @@ simdutf_warn_unused size_t convert_utf16be_to_utf8(const char16_t *input, */ simdutf_warn_unused result convert_utf16_to_latin1_with_errors( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16_to_latin1_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16_to_latin1_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-16LE string into Latin1 string. @@ -1794,6 +2203,16 @@ simdutf_warn_unused result convert_utf16_to_latin1_with_errors( */ simdutf_warn_unused result convert_utf16le_to_latin1_with_errors( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16le_to_latin1_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16le_to_latin1_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-16BE string into Latin1 string. @@ -1814,6 +2233,16 @@ simdutf_warn_unused result convert_utf16le_to_latin1_with_errors( */ simdutf_warn_unused result convert_utf16be_to_latin1_with_errors( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16be_to_latin1_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf16be_to_latin1_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into UTF-8 @@ -1834,6 +2263,16 @@ simdutf_warn_unused result convert_utf16be_to_latin1_with_errors( */ simdutf_warn_unused result convert_utf16_to_utf8_with_errors( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16_to_utf8_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16_to_utf8_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert possibly broken UTF-16LE string into UTF-8 string and stop on error. @@ -1853,6 +2292,16 @@ simdutf_warn_unused result convert_utf16_to_utf8_with_errors( */ simdutf_warn_unused result convert_utf16le_to_utf8_with_errors( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16le_to_utf8_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16le_to_utf8_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert possibly broken UTF-16BE string into UTF-8 string and stop on error. @@ -1872,6 +2321,16 @@ simdutf_warn_unused result convert_utf16le_to_utf8_with_errors( */ simdutf_warn_unused result convert_utf16be_to_utf8_with_errors( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16be_to_utf8_with_errors( + std::span utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf16be_to_utf8_with_errors( + utf16_input.data(), utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert valid UTF-16 string into UTF-8 string. @@ -1888,6 +2347,15 @@ simdutf_warn_unused result convert_utf16be_to_utf8_with_errors( */ simdutf_warn_unused size_t convert_valid_utf16_to_utf8( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf16_to_utf8( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_valid_utf16_to_utf8( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert UTF-16 string into Latin1 string. @@ -1910,6 +2378,15 @@ simdutf_warn_unused size_t convert_valid_utf16_to_utf8( */ simdutf_warn_unused size_t convert_valid_utf16_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf16_to_latin1( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_valid_utf16_to_latin1( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert valid UTF-16LE string into Latin1 string. @@ -1932,6 +2409,16 @@ simdutf_warn_unused size_t convert_valid_utf16_to_latin1( */ simdutf_warn_unused size_t convert_valid_utf16le_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf16le_to_latin1( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_valid_utf16le_to_latin1( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert valid UTF-16BE string into Latin1 string. @@ -1954,6 +2441,16 @@ simdutf_warn_unused size_t convert_valid_utf16le_to_latin1( */ simdutf_warn_unused size_t convert_valid_utf16be_to_latin1( const char16_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf16be_to_latin1( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_valid_utf16be_to_latin1( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert valid UTF-16LE string into UTF-8 string. @@ -1971,6 +2468,15 @@ simdutf_warn_unused size_t convert_valid_utf16be_to_latin1( */ simdutf_warn_unused size_t convert_valid_utf16le_to_utf8( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf16le_to_utf8( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_valid_utf16le_to_utf8( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert valid UTF-16BE string into UTF-8 string. @@ -1987,6 +2493,15 @@ simdutf_warn_unused size_t convert_valid_utf16le_to_utf8( */ simdutf_warn_unused size_t convert_valid_utf16be_to_utf8( const char16_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf16be_to_utf8( + std::span valid_utf16_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_valid_utf16be_to_utf8( + valid_utf16_input.data(), valid_utf16_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into UTF-32 @@ -2005,6 +2520,14 @@ simdutf_warn_unused size_t convert_valid_utf16be_to_utf8( */ simdutf_warn_unused size_t convert_utf16_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf16_to_utf32(std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16_to_utf32(utf16_input.data(), utf16_input.size(), + utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-16LE string into UTF-32 string. @@ -2022,6 +2545,14 @@ simdutf_warn_unused size_t convert_utf16_to_utf32( */ simdutf_warn_unused size_t convert_utf16le_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf16le_to_utf32(std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16le_to_utf32(utf16_input.data(), utf16_input.size(), + utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-16BE string into UTF-32 string. @@ -2039,6 +2570,14 @@ simdutf_warn_unused size_t convert_utf16le_to_utf32( */ simdutf_warn_unused size_t convert_utf16be_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf16be_to_utf32(std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16be_to_utf32(utf16_input.data(), utf16_input.size(), + utf32_output.data()); +} +#endif /** * Using native endianness, convert possibly broken UTF-16 string into @@ -2059,6 +2598,14 @@ simdutf_warn_unused size_t convert_utf16be_to_utf32( */ simdutf_warn_unused result convert_utf16_to_utf32_with_errors( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16_to_utf32_with_errors(std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16_to_utf32_with_errors( + utf16_input.data(), utf16_input.size(), utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-16LE string into UTF-32 string and stop on error. @@ -2078,6 +2625,15 @@ simdutf_warn_unused result convert_utf16_to_utf32_with_errors( */ simdutf_warn_unused result convert_utf16le_to_utf32_with_errors( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16le_to_utf32_with_errors( + std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16le_to_utf32_with_errors( + utf16_input.data(), utf16_input.size(), utf32_output.data()); +} +#endif /** * Convert possibly broken UTF-16BE string into UTF-32 string and stop on error. @@ -2097,6 +2653,15 @@ simdutf_warn_unused result convert_utf16le_to_utf32_with_errors( */ simdutf_warn_unused result convert_utf16be_to_utf32_with_errors( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf16be_to_utf32_with_errors( + std::span utf16_input, + std::span utf32_output) noexcept { + return convert_utf16be_to_utf32_with_errors( + utf16_input.data(), utf16_input.size(), utf32_output.data()); +} +#endif /** * Using native endianness, convert valid UTF-16 string into UTF-32 string. @@ -2114,6 +2679,14 @@ simdutf_warn_unused result convert_utf16be_to_utf32_with_errors( */ simdutf_warn_unused size_t convert_valid_utf16_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf16_to_utf32(std::span valid_utf16_input, + std::span utf32_output) noexcept { + return convert_valid_utf16_to_utf32( + valid_utf16_input.data(), valid_utf16_input.size(), utf32_output.data()); +} +#endif /** * Convert valid UTF-16LE string into UTF-32 string. @@ -2130,6 +2703,14 @@ simdutf_warn_unused size_t convert_valid_utf16_to_utf32( */ simdutf_warn_unused size_t convert_valid_utf16le_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf16le_to_utf32(std::span valid_utf16_input, + std::span utf32_output) noexcept { + return convert_valid_utf16le_to_utf32( + valid_utf16_input.data(), valid_utf16_input.size(), utf32_output.data()); +} +#endif /** * Convert valid UTF-16BE string into UTF-32 string. @@ -2146,8 +2727,16 @@ simdutf_warn_unused size_t convert_valid_utf16le_to_utf32( */ simdutf_warn_unused size_t convert_valid_utf16be_to_utf32( const char16_t *input, size_t length, char32_t *utf32_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf16be_to_utf32(std::span valid_utf16_input, + std::span utf32_output) noexcept { + return convert_valid_utf16be_to_utf32( + valid_utf16_input.data(), valid_utf16_input.size(), utf32_output.data()); +} +#endif -/* +/** * Compute the number of bytes that this UTF-16LE/BE string would require in * Latin1 format. * @@ -2174,6 +2763,13 @@ simdutf_warn_unused size_t latin1_length_from_utf16(size_t length) noexcept; */ simdutf_warn_unused size_t utf8_length_from_utf16(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf8_length_from_utf16(std::span valid_utf16_input) noexcept { + return utf8_length_from_utf16(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Compute the number of bytes that this UTF-16LE string would require in UTF-8 @@ -2188,6 +2784,13 @@ simdutf_warn_unused size_t utf8_length_from_utf16(const char16_t *input, */ simdutf_warn_unused size_t utf8_length_from_utf16le(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf8_length_from_utf16le(std::span valid_utf16_input) noexcept { + return utf8_length_from_utf16le(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Compute the number of bytes that this UTF-16BE string would require in UTF-8 @@ -2202,6 +2805,13 @@ simdutf_warn_unused size_t utf8_length_from_utf16le(const char16_t *input, */ simdutf_warn_unused size_t utf8_length_from_utf16be(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf8_length_from_utf16be(std::span valid_utf16_input) noexcept { + return utf8_length_from_utf16be(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-8 string. @@ -2219,6 +2829,14 @@ simdutf_warn_unused size_t utf8_length_from_utf16be(const char16_t *input, simdutf_warn_unused size_t convert_utf32_to_utf8(const char32_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf32_to_utf8( + std::span utf32_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf32_to_utf8(utf32_input.data(), utf32_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-8 string and stop on error. @@ -2238,6 +2856,16 @@ simdutf_warn_unused size_t convert_utf32_to_utf8(const char32_t *input, */ simdutf_warn_unused result convert_utf32_to_utf8_with_errors( const char32_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf32_to_utf8_with_errors( + std::span utf32_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_utf32_to_utf8_with_errors( + utf32_input.data(), utf32_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Convert valid UTF-32 string into UTF-8 string. @@ -2254,6 +2882,15 @@ simdutf_warn_unused result convert_utf32_to_utf8_with_errors( */ simdutf_warn_unused size_t convert_valid_utf32_to_utf8( const char32_t *input, size_t length, char *utf8_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf32_to_utf8( + std::span valid_utf32_input, + detail::output_span_of_byte_like auto &&utf8_output) noexcept { + return convert_valid_utf32_to_utf8( + valid_utf32_input.data(), valid_utf32_input.size(), + reinterpret_cast(utf8_output.data())); +} +#endif /** * Using native endianness, convert possibly broken UTF-32 string into a UTF-16 @@ -2271,6 +2908,14 @@ simdutf_warn_unused size_t convert_valid_utf32_to_utf8( */ simdutf_warn_unused size_t convert_utf32_to_utf16( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf32_to_utf16(std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16(utf32_input.data(), utf32_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-16LE string. @@ -2287,6 +2932,14 @@ simdutf_warn_unused size_t convert_utf32_to_utf16( */ simdutf_warn_unused size_t convert_utf32_to_utf16le( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf32_to_utf16le(std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16le(utf32_input.data(), utf32_input.size(), + utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-32 string into Latin1 string. @@ -2304,6 +2957,15 @@ simdutf_warn_unused size_t convert_utf32_to_utf16le( */ simdutf_warn_unused size_t convert_utf32_to_latin1( const char32_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_utf32_to_latin1( + std::span utf32_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf32_to_latin1( + utf32_input.data(), utf32_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-32 string into Latin1 string and stop on error. @@ -2324,6 +2986,16 @@ simdutf_warn_unused size_t convert_utf32_to_latin1( */ simdutf_warn_unused result convert_utf32_to_latin1_with_errors( const char32_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf32_to_latin1_with_errors( + std::span utf32_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_utf32_to_latin1_with_errors( + utf32_input.data(), utf32_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert valid UTF-32 string into Latin1 string. @@ -2347,6 +3019,15 @@ simdutf_warn_unused result convert_utf32_to_latin1_with_errors( */ simdutf_warn_unused size_t convert_valid_utf32_to_latin1( const char32_t *input, size_t length, char *latin1_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t convert_valid_utf32_to_latin1( + std::span valid_utf32_input, + detail::output_span_of_byte_like auto &&latin1_output) noexcept { + return convert_valid_utf32_to_latin1( + valid_utf32_input.data(), valid_utf32_input.size(), + reinterpret_cast(latin1_output.data())); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-16BE string. @@ -2363,6 +3044,14 @@ simdutf_warn_unused size_t convert_valid_utf32_to_latin1( */ simdutf_warn_unused size_t convert_utf32_to_utf16be( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_utf32_to_utf16be(std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16be(utf32_input.data(), utf32_input.size(), + utf16_output.data()); +} +#endif /** * Using native endianness, convert possibly broken UTF-32 string into UTF-16 @@ -2383,6 +3072,14 @@ simdutf_warn_unused size_t convert_utf32_to_utf16be( */ simdutf_warn_unused result convert_utf32_to_utf16_with_errors( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf32_to_utf16_with_errors(std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16_with_errors( + utf32_input.data(), utf32_input.size(), utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-16LE string and stop on error. @@ -2402,6 +3099,15 @@ simdutf_warn_unused result convert_utf32_to_utf16_with_errors( */ simdutf_warn_unused result convert_utf32_to_utf16le_with_errors( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf32_to_utf16le_with_errors( + std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16le_with_errors( + utf32_input.data(), utf32_input.size(), utf16_output.data()); +} +#endif /** * Convert possibly broken UTF-32 string into UTF-16BE string and stop on error. @@ -2421,6 +3127,15 @@ simdutf_warn_unused result convert_utf32_to_utf16le_with_errors( */ simdutf_warn_unused result convert_utf32_to_utf16be_with_errors( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result +convert_utf32_to_utf16be_with_errors( + std::span utf32_input, + std::span utf16_output) noexcept { + return convert_utf32_to_utf16be_with_errors( + utf32_input.data(), utf32_input.size(), utf16_output.data()); +} +#endif /** * Using native endianness, convert valid UTF-32 string into a UTF-16 string. @@ -2437,6 +3152,14 @@ simdutf_warn_unused result convert_utf32_to_utf16be_with_errors( */ simdutf_warn_unused size_t convert_valid_utf32_to_utf16( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf32_to_utf16(std::span valid_utf32_input, + std::span utf16_output) noexcept { + return convert_valid_utf32_to_utf16( + valid_utf32_input.data(), valid_utf32_input.size(), utf16_output.data()); +} +#endif /** * Convert valid UTF-32 string into UTF-16LE string. @@ -2453,6 +3176,14 @@ simdutf_warn_unused size_t convert_valid_utf32_to_utf16( */ simdutf_warn_unused size_t convert_valid_utf32_to_utf16le( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf32_to_utf16le(std::span valid_utf32_input, + std::span utf16_output) noexcept { + return convert_valid_utf32_to_utf16le( + valid_utf32_input.data(), valid_utf32_input.size(), utf16_output.data()); +} +#endif /** * Convert valid UTF-32 string into UTF-16BE string. @@ -2469,6 +3200,14 @@ simdutf_warn_unused size_t convert_valid_utf32_to_utf16le( */ simdutf_warn_unused size_t convert_valid_utf32_to_utf16be( const char32_t *input, size_t length, char16_t *utf16_buffer) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +convert_valid_utf32_to_utf16be(std::span valid_utf32_input, + std::span utf16_output) noexcept { + return convert_valid_utf32_to_utf16be( + valid_utf32_input.data(), valid_utf32_input.size(), utf16_output.data()); +} +#endif /** * Change the endianness of the input. Can be used to go from UTF-16LE to @@ -2485,6 +3224,14 @@ simdutf_warn_unused size_t convert_valid_utf32_to_utf16be( */ void change_endianness_utf16(const char16_t *input, size_t length, char16_t *output) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline void +change_endianness_utf16(std::span utf16_input, + std::span utf16_output) noexcept { + return change_endianness_utf16(utf16_input.data(), utf16_input.size(), + utf16_output.data()); +} +#endif /** * Compute the number of bytes that this UTF-32 string would require in UTF-8 @@ -2499,6 +3246,13 @@ void change_endianness_utf16(const char16_t *input, size_t length, */ simdutf_warn_unused size_t utf8_length_from_utf32(const char32_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf8_length_from_utf32(std::span valid_utf32_input) noexcept { + return utf8_length_from_utf32(valid_utf32_input.data(), + valid_utf32_input.size()); +} +#endif /** * Compute the number of two-byte code units that this UTF-32 string would @@ -2513,6 +3267,13 @@ simdutf_warn_unused size_t utf8_length_from_utf32(const char32_t *input, */ simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf16_length_from_utf32(std::span valid_utf32_input) noexcept { + return utf16_length_from_utf32(valid_utf32_input.data(), + valid_utf32_input.size()); +} +#endif /** * Using native endianness; Compute the number of bytes that this UTF-16 @@ -2531,6 +3292,13 @@ simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t *input, */ simdutf_warn_unused size_t utf32_length_from_utf16(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +utf32_length_from_utf16(std::span valid_utf16_input) noexcept { + return utf32_length_from_utf16(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Compute the number of bytes that this UTF-16LE string would require in UTF-32 @@ -2549,6 +3317,13 @@ simdutf_warn_unused size_t utf32_length_from_utf16(const char16_t *input, */ simdutf_warn_unused size_t utf32_length_from_utf16le(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t utf32_length_from_utf16le( + std::span valid_utf16_input) noexcept { + return utf32_length_from_utf16le(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Compute the number of bytes that this UTF-16BE string would require in UTF-32 @@ -2567,6 +3342,13 @@ simdutf_warn_unused size_t utf32_length_from_utf16le(const char16_t *input, */ simdutf_warn_unused size_t utf32_length_from_utf16be(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t utf32_length_from_utf16be( + std::span valid_utf16_input) noexcept { + return utf32_length_from_utf16be(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Count the number of code points (characters) in the string assuming that @@ -2584,6 +3366,12 @@ simdutf_warn_unused size_t utf32_length_from_utf16be(const char16_t *input, */ simdutf_warn_unused size_t count_utf16(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +count_utf16(std::span valid_utf16_input) noexcept { + return count_utf16(valid_utf16_input.data(), valid_utf16_input.size()); +} +#endif /** * Count the number of code points (characters) in the string assuming that @@ -2601,6 +3389,12 @@ simdutf_warn_unused size_t count_utf16(const char16_t *input, */ simdutf_warn_unused size_t count_utf16le(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +count_utf16le(std::span valid_utf16_input) noexcept { + return count_utf16le(valid_utf16_input.data(), valid_utf16_input.size()); +} +#endif /** * Count the number of code points (characters) in the string assuming that @@ -2618,6 +3412,12 @@ simdutf_warn_unused size_t count_utf16le(const char16_t *input, */ simdutf_warn_unused size_t count_utf16be(const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +count_utf16be(std::span valid_utf16_input) noexcept { + return count_utf16be(valid_utf16_input.data(), valid_utf16_input.size()); +} +#endif /** * Count the number of code points (characters) in the string assuming that @@ -2633,6 +3433,13 @@ simdutf_warn_unused size_t count_utf16be(const char16_t *input, */ simdutf_warn_unused size_t count_utf8(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t count_utf8( + const detail::input_span_of_byte_like auto &valid_utf8_input) noexcept { + return count_utf8(reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size()); +} +#endif /** * Given a valid UTF-8 string having a possibly truncated last character, @@ -2649,6 +3456,14 @@ simdutf_warn_unused size_t count_utf8(const char *input, * @return the length of the string in bytes, possibly shorter by 1 to 3 bytes */ simdutf_warn_unused size_t trim_partial_utf8(const char *input, size_t length); +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t trim_partial_utf8( + const detail::input_span_of_byte_like auto &valid_utf8_input) noexcept { + return trim_partial_utf8( + reinterpret_cast(valid_utf8_input.data()), + valid_utf8_input.size()); +} +#endif /** * Given a valid UTF-16BE string having a possibly truncated last character, @@ -2666,6 +3481,13 @@ simdutf_warn_unused size_t trim_partial_utf8(const char *input, size_t length); */ simdutf_warn_unused size_t trim_partial_utf16be(const char16_t *input, size_t length); +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +trim_partial_utf16be(std::span valid_utf16_input) noexcept { + return trim_partial_utf16be(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Given a valid UTF-16LE string having a possibly truncated last character, @@ -2683,6 +3505,13 @@ simdutf_warn_unused size_t trim_partial_utf16be(const char16_t *input, */ simdutf_warn_unused size_t trim_partial_utf16le(const char16_t *input, size_t length); +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +trim_partial_utf16le(std::span valid_utf16_input) noexcept { + return trim_partial_utf16le(valid_utf16_input.data(), + valid_utf16_input.size()); +} +#endif /** * Given a valid UTF-16 string having a possibly truncated last character, @@ -2700,6 +3529,12 @@ simdutf_warn_unused size_t trim_partial_utf16le(const char16_t *input, */ simdutf_warn_unused size_t trim_partial_utf16(const char16_t *input, size_t length); +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +trim_partial_utf16(std::span valid_utf16_input) noexcept { + return trim_partial_utf16(valid_utf16_input.data(), valid_utf16_input.size()); +} +#endif // base64_options are used to specify the base64 encoding options. // ASCII spaces are ' ', '\t', '\n', '\r', '\f' @@ -2742,6 +3577,14 @@ enum last_chunk_handling_options : uint64_t { */ simdutf_warn_unused size_t maximal_binary_length_from_base64(const char *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +maximal_binary_length_from_base64( + const detail::input_span_of_byte_like auto &input) noexcept { + return maximal_binary_length_from_base64( + reinterpret_cast(input.data()), input.size()); +} +#endif /** * Provide the maximal binary length in bytes given the base64 input. @@ -2755,6 +3598,12 @@ maximal_binary_length_from_base64(const char *input, size_t length) noexcept; */ simdutf_warn_unused size_t maximal_binary_length_from_base64( const char16_t *input, size_t length) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +maximal_binary_length_from_base64(std::span input) noexcept { + return maximal_binary_length_from_base64(input.data(), input.size()); +} +#endif /** * Convert a base64 input to a binary output. @@ -2814,6 +3663,18 @@ simdutf_warn_unused result base64_to_binary( const char *input, size_t length, char *output, base64_options options = base64_default, last_chunk_handling_options last_chunk_options = loose) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result base64_to_binary( + const detail::input_span_of_byte_like auto &input, + detail::output_span_of_byte_like auto &&binary_output, + base64_options options = base64_default, + last_chunk_handling_options last_chunk_options = loose) noexcept { + return base64_to_binary(reinterpret_cast(input.data()), + input.size(), + reinterpret_cast(binary_output.data()), + options, last_chunk_options); +} +#endif /** * Provide the base64 length in bytes given the length of a binary input. @@ -2847,6 +3708,16 @@ simdutf_warn_unused size_t base64_length_from_binary( */ size_t binary_to_base64(const char *input, size_t length, char *output, base64_options options = base64_default) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused size_t +binary_to_base64(const detail::input_span_of_byte_like auto &input, + detail::output_span_of_byte_like auto &&binary_output, + base64_options options = base64_default) noexcept { + return binary_to_base64( + reinterpret_cast(input.data()), input.size(), + reinterpret_cast(binary_output.data()), options); +} +#endif /** * Convert a base64 input to a binary output. @@ -2909,6 +3780,17 @@ base64_to_binary(const char16_t *input, size_t length, char *output, base64_options options = base64_default, last_chunk_handling_options last_chunk_options = last_chunk_handling_options::loose) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result base64_to_binary( + std::span input, + detail::output_span_of_byte_like auto &&binary_output, + base64_options options = base64_default, + last_chunk_handling_options last_chunk_options = loose) noexcept { + return base64_to_binary(input.data(), input.size(), + reinterpret_cast(binary_output.data()), + options, last_chunk_options); +} +#endif /** * Convert a base64 input to a binary output. @@ -2976,11 +3858,43 @@ base64_to_binary_safe(const char *input, size_t length, char *output, size_t &outlen, base64_options options = base64_default, last_chunk_handling_options last_chunk_options = last_chunk_handling_options::loose) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result base64_to_binary_safe( + const detail::input_span_of_byte_like auto &input, + detail::output_span_of_byte_like auto &&binary_output, + base64_options options = base64_default, + last_chunk_handling_options last_chunk_options = loose) noexcept { + // we can't write the outlen to the provided output span, the user will have + // to pick it up from the returned value instead (assuming success). we still + // get the benefit of providing info of how long the output buffer is. + size_t outlen = binary_output.size(); + return base64_to_binary_safe(reinterpret_cast(input.data()), + input.size(), + reinterpret_cast(binary_output.data()), + outlen, options, last_chunk_options); +} +#endif + simdutf_warn_unused result base64_to_binary_safe(const char16_t *input, size_t length, char *output, size_t &outlen, base64_options options = base64_default, last_chunk_handling_options last_chunk_options = last_chunk_handling_options::loose) noexcept; +#if SIMDUTF_SPAN +simdutf_really_inline simdutf_warn_unused result base64_to_binary_safe( + std::span input, + detail::output_span_of_byte_like auto &&binary_output, + base64_options options = base64_default, + last_chunk_handling_options last_chunk_options = loose) noexcept { + // we can't write the outlen to the provided output span, the user will have + // to pick it up from the returned value instead (assuming success). we still + // get the benefit of providing info of how long the output buffer is. + size_t outlen = binary_output.size(); + return base64_to_binary_safe(input.data(), input.size(), + reinterpret_cast(binary_output.data()), + outlen, options, last_chunk_options); +} +#endif /** * An implementation of simdutf for a particular CPU architecture. @@ -4243,7 +5157,7 @@ class implementation { simdutf_warn_unused virtual size_t latin1_length_from_utf8(const char *input, size_t length) const noexcept = 0; - /* + /** * Compute the number of bytes that this UTF-16LE/BE string would require in * Latin1 format. * @@ -4289,7 +5203,7 @@ class implementation { simdutf_warn_unused virtual size_t utf32_length_from_latin1(size_t length) const noexcept = 0; - /* + /** * Compute the number of bytes that this UTF-16LE string would require in * UTF-32 format. * @@ -4310,7 +5224,7 @@ class implementation { utf32_length_from_utf16le(const char16_t *input, size_t length) const noexcept = 0; - /* + /** * Compute the number of bytes that this UTF-16BE string would require in * UTF-32 format. * From f54118c84aa3e3059620a54322644e1c76a42b3e Mon Sep 17 00:00:00 2001 From: Jacob Smith <3012099+JakobJingleheimer@users.noreply.github.com> Date: Tue, 14 Jan 2025 15:24:41 +0100 Subject: [PATCH 068/158] doc: correct customization hook types & clarify descriptions PR-URL: https://github.com/nodejs/node/pull/56454 Reviewed-By: Geoffrey Booth Reviewed-By: James M Snell Reviewed-By: Matteo Collina --- doc/api/module.md | 15 ++++++---- lib/internal/modules/customization_hooks.js | 33 +++++++++++++++++---- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/doc/api/module.md b/doc/api/module.md index ace26adc6c6fce..6b9d6cf035575d 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -1039,13 +1039,14 @@ changes: * `nextResolve` {Function} The subsequent `resolve` hook in the chain, or the Node.js default `resolve` hook after the last user-supplied `resolve` hook * `specifier` {string} - * `context` {Object} + * `context` {Object|undefined} When omitted, the defaults are provided. When provided, defaults + are merged in with preference to the provided properties. * Returns: {Object|Promise} The asynchronous version takes either an object containing the following properties, or a `Promise` that will resolve to such an object. The synchronous version only accepts an object returned synchronously. - * `format` {string|null|undefined} A hint to the load hook (it might be - ignored) - `'builtin' | 'commonjs' | 'json' | 'module' | 'wasm'` + * `format` {string|null|undefined} A hint to the `load` hook (it might be ignored). It can be a + module format (such as `'commonjs'` or `'module'`) or an arbitrary value like `'css'` or + `'yaml'`. * `importAttributes` {Object|undefined} The import attributes to use when caching the module (optional; if excluded the input will be used) * `shortCircuit` {undefined|boolean} A signal that this hook intends to @@ -1148,12 +1149,14 @@ changes: * `context` {Object} * `conditions` {string\[]} Export conditions of the relevant `package.json` * `format` {string|null|undefined} The format optionally supplied by the - `resolve` hook chain + `resolve` hook chain. This can be any string value as an input; input values do not need to + conform to the list of acceptable return values described below. * `importAttributes` {Object} * `nextLoad` {Function} The subsequent `load` hook in the chain, or the Node.js default `load` hook after the last user-supplied `load` hook * `url` {string} - * `context` {Object} + * `context` {Object|undefined} When omitted, defaults are provided. When provided, defaults are + merged in with preference to the provided properties. * Returns: {Object|Promise} The asynchronous version takes either an object containing the following properties, or a `Promise` that will resolve to such an object. The synchronous version only accepts an object returned synchronously. diff --git a/lib/internal/modules/customization_hooks.js b/lib/internal/modules/customization_hooks.js index c7a7a6d53dffd8..9570f52ddc5884 100644 --- a/lib/internal/modules/customization_hooks.js +++ b/lib/internal/modules/customization_hooks.js @@ -25,17 +25,40 @@ let debug = require('internal/util/debuglog').debuglog('module_hooks', (fn) => { debug = fn; }); -/** @typedef {import('internal/modules/cjs/loader.js').Module} Module */ /** - * @typedef {(specifier: string, context: ModuleResolveContext, nextResolve: ResolveHook) - * => ModuleResolveResult} ResolveHook - * @typedef {(url: string, context: ModuleLoadContext, nextLoad: LoadHook) - * => ModuleLoadResult} LoadHook + * @typedef {import('internal/modules/cjs/loader.js').Module} Module + */ +/** + * @typedef {( + * specifier: string, + * context: Partial, + * ) => ModuleResolveResult + * } NextResolve + * @typedef {( + * specifier: string, + * context: ModuleResolveContext, + * nextResolve: NextResolve, + * ) => ModuleResolveResult + * } ResolveHook + * @typedef {( + * url: string, + * context: Partial, + * ) => ModuleLoadResult + * } NextLoad + * @typedef {( + * url: string, + * context: ModuleLoadContext, + * nextLoad: NextLoad, + * ) => ModuleLoadResult + * } LoadHook */ // Use arrays for better insertion and iteration performance, we don't care // about deletion performance as much. + +/** @type {ResolveHook[]} */ const resolveHooks = []; +/** @type {LoadHook[]} */ const loadHooks = []; const hookId = Symbol('kModuleHooksIdKey'); let nextHookId = 0; From cf161237851ee800825c233f8d3f76af6f8b22e3 Mon Sep 17 00:00:00 2001 From: Carlos Espa <43477095+Ceres6@users.noreply.github.com> Date: Tue, 14 Jan 2025 19:24:30 +0100 Subject: [PATCH 069/158] src,worker: add isInternalWorker PR-URL: https://github.com/nodejs/node/pull/56469 Reviewed-By: Jacob Smith Reviewed-By: James M Snell Reviewed-By: Bryan English --- doc/api/worker_threads.md | 38 ++++++++++++++++++++++ lib/internal/worker.js | 2 ++ lib/worker_threads.js | 2 ++ src/node_worker.cc | 19 +++++++++-- src/node_worker.h | 5 ++- test/fixtures/loader-is-internal-thread.js | 3 ++ test/fixtures/worker-is-internal-thread.js | 3 ++ test/parallel/test-is-internal-thread.mjs | 36 ++++++++++++++++++++ 8 files changed, 104 insertions(+), 4 deletions(-) create mode 100644 test/fixtures/loader-is-internal-thread.js create mode 100644 test/fixtures/worker-is-internal-thread.js create mode 100644 test/parallel/test-is-internal-thread.mjs diff --git a/doc/api/worker_threads.md b/doc/api/worker_threads.md index afecd991da9dd3..f2f650bc596e36 100644 --- a/doc/api/worker_threads.md +++ b/doc/api/worker_threads.md @@ -100,6 +100,44 @@ if (isMainThread) { } ``` +## `worker.isInternalThread` + + + +* {boolean} + +Is `true` if this code is running inside of an internal [`Worker`][] thread (e.g the loader thread). + +```bash +node --experimental-loader ./loader.js main.js +``` + +```cjs +// loader.js +const { isInternalThread } = require('node:worker_threads'); +console.log(isInternalThread); // true +``` + +```mjs +// loader.js +import { isInternalThread } from 'node:worker_threads'; +console.log(isInternalThread); // true +``` + +```cjs +// main.js +const { isInternalThread } = require('node:worker_threads'); +console.log(isInternalThread); // false +``` + +```mjs +// main.js +import { isInternalThread } from 'node:worker_threads'; +console.log(isInternalThread); // false +``` + ## `worker.isMainThread` + +* `condition` {Function|AsyncFunction} An assertion function that is invoked + periodically until it completes successfully or the defined polling timeout + elapses. Successful completion is defined as not throwing or rejecting. This + function does not accept any arguments, and is allowed to return any value. +* `options` {Object} An optional configuration object for the polling operation. + The following properties are supported: + * `interval` {number} The number of milliseconds to wait after an unsuccessful + invocation of `condition` before trying again. **Default:** `50`. + * `timeout` {number} The poll timeout in milliseconds. If `condition` has not + succeeded by the time this elapses, an error occurs. **Default:** `1000`. +* Returns: {Promise} Fulfilled with the value returned by `condition`. + +This method polls a `condition` function until that function either returns +successfully or the operation times out. + ## Class: `SuiteContext` + +* +* +* +* +* + + + +#### Project contacts + +* @marco-ippolito From 67f39b597a4f8f71d8ae3a3f236e69c9e9a7b46c Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Thu, 16 Jan 2025 17:44:06 -0500 Subject: [PATCH 091/158] doc: tweak info on reposts in ambassador program Signed-off-by: Michael Dawson PR-URL: https://github.com/nodejs/node/pull/56589 Reviewed-By: Rafael Gonzaga Reviewed-By: James M Snell Reviewed-By: Marco Ippolito --- doc/contributing/advocacy-ambassador-program.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/contributing/advocacy-ambassador-program.md b/doc/contributing/advocacy-ambassador-program.md index 058a4d7c45cd69..76f29b73586691 100644 --- a/doc/contributing/advocacy-ambassador-program.md +++ b/doc/contributing/advocacy-ambassador-program.md @@ -94,11 +94,14 @@ process. An ambassador can request promotion of content in the following ways: * Posting a link to the content in the "what's new" issue in nodejs/ambassadors so that it goes out on the news feed. -Foundation staff will repost the social media post -without any need for validation based on the request coming from -an ambassador. These requests can be made through the existing social channel -in the OpenJS Slack. For that reason and for communication purposes and -collaboration opportunities, ambassadors should be members of the +For accounts managed by foundation staff, the staff will repost the social +media post without any need for validation based on the request coming from +an ambassador. For accounts managed by the project with an approval process, +(for example bluesky) documentation for the approval process will indicate +that repost requests from ambassadors should generally be approved. These +requests can be made through the existing social channel in the OpenJS Slack. +For that reason and for communication purposes and collaboration opportunities, +ambassadors should be members of the [OpenJS Slack](https://slack-invite.openjsf.org/). ## Messages and topics to promote From f97cd5b02b8701529eab58f786d36922af119057 Mon Sep 17 00:00:00 2001 From: Pietro Marchini Date: Fri, 17 Jan 2025 11:34:55 +0100 Subject: [PATCH 092/158] test_runner: remove unused errors PR-URL: https://github.com/nodejs/node/pull/56607 Reviewed-By: Colin Ihrig Reviewed-By: Jacob Smith --- doc/api/errors.md | 38 +++++++++++++++++++------------------- lib/internal/errors.js | 15 --------------- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/doc/api/errors.md b/doc/api/errors.md index d59a51329a8bfa..eb90fee8fc5ed5 100644 --- a/doc/api/errors.md +++ b/doc/api/errors.md @@ -2809,25 +2809,6 @@ An unspecified or non-specific system error has occurred within the Node.js process. The error object will have an `err.info` object property with additional details. - - -### `ERR_TAP_LEXER_ERROR` - -An error representing a failing lexer state. - - - -### `ERR_TAP_PARSER_ERROR` - -An error representing a failing parser state. Additional information about -the token causing the error is available via the `cause` property. - - - -### `ERR_TAP_VALIDATION_ERROR` - -This error represents a failed TAP validation. - ### `ERR_TEST_FAILURE` @@ -3863,6 +3844,25 @@ removed: v10.0.0 Used when an attempt is made to use a readable stream that has not implemented [`readable._read()`][]. + + +### `ERR_TAP_LEXER_ERROR` + +An error representing a failing lexer state. + + + +### `ERR_TAP_PARSER_ERROR` + +An error representing a failing parser state. Additional information about +the token causing the error is available via the `cause` property. + + + +### `ERR_TAP_VALIDATION_ERROR` + +This error represents a failed TAP validation. + ### `ERR_TLS_RENEGOTIATION_FAILED` diff --git a/lib/internal/errors.js b/lib/internal/errors.js index d990f8d5a106aa..bda50797124758 100644 --- a/lib/internal/errors.js +++ b/lib/internal/errors.js @@ -1739,21 +1739,6 @@ E('ERR_STREAM_WRAP', 'Stream has StringDecoder set or is in objectMode', Error); E('ERR_STREAM_WRITE_AFTER_END', 'write after end', Error); E('ERR_SYNTHETIC', 'JavaScript Callstack', Error); E('ERR_SYSTEM_ERROR', 'A system error occurred', SystemError, HideStackFramesError); -E('ERR_TAP_LEXER_ERROR', function(errorMsg) { - hideInternalStackFrames(this); - return errorMsg; -}, Error); -E('ERR_TAP_PARSER_ERROR', function(errorMsg, details, tokenCausedError, source) { - hideInternalStackFrames(this); - this.cause = tokenCausedError; - const { column, line, start, end } = tokenCausedError.location; - const errorDetails = `${details} at line ${line}, column ${column} (start ${start}, end ${end})`; - return errorMsg + errorDetails; -}, SyntaxError); -E('ERR_TAP_VALIDATION_ERROR', function(errorMsg) { - hideInternalStackFrames(this); - return errorMsg; -}, Error); E('ERR_TEST_FAILURE', function(error, failureType) { hideInternalStackFrames(this); assert(typeof failureType === 'string' || typeof failureType === 'symbol', From 9c5c3b3115a613bd182e6c99c467a385496678d0 Mon Sep 17 00:00:00 2001 From: Marco Ippolito Date: Fri, 17 Jan 2025 13:42:50 +0100 Subject: [PATCH 093/158] module: add ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX PR-URL: https://github.com/nodejs/node/pull/56610 Reviewed-By: James M Snell Reviewed-By: Ruben Bridgewater Reviewed-By: Geoffrey Booth Reviewed-By: Ethan Arrowood Reviewed-By: Chengzhong Wu --- doc/api/cli.md | 4 +- doc/api/errors.md | 20 ++++++++-- lib/internal/errors.js | 1 + lib/internal/modules/typescript.js | 17 +++++++- lib/internal/process/execution.js | 40 +++++++------------ test/es-module/test-typescript-eval.mjs | 36 +++++++++++++---- test/es-module/test-typescript.mjs | 10 +++++ .../typescript/ts/test-invalid-syntax.ts | 3 ++ 8 files changed, 93 insertions(+), 38 deletions(-) create mode 100644 test/fixtures/typescript/ts/test-invalid-syntax.ts diff --git a/doc/api/cli.md b/doc/api/cli.md index b2e20da8fc161b..d36c2c2be810e3 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -1403,7 +1403,8 @@ Node.js will try to detect the syntax with the following steps: 1. Run the input as CommonJS. 2. If step 1 fails, run the input as an ES module. 3. If step 2 fails with a SyntaxError, strip the types. -4. If step 3 fails with an error code [`ERR_INVALID_TYPESCRIPT_SYNTAX`][], +4. If step 3 fails with an error code [`ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX`][] + or [`ERR_INVALID_TYPESCRIPT_SYNTAX`][], throw the error from step 2, including the TypeScript error in the message, else run as CommonJS. 5. If step 4 fails, run the input as an ES module. @@ -3691,6 +3692,7 @@ node --stack-trace-limit=12 -p -e "Error.stackTraceLimit" # prints 12 [`Buffer`]: buffer.md#class-buffer [`CRYPTO_secure_malloc_init`]: https://www.openssl.org/docs/man3.0/man3/CRYPTO_secure_malloc_init.html [`ERR_INVALID_TYPESCRIPT_SYNTAX`]: errors.md#err_invalid_typescript_syntax +[`ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX`]: errors.md#err_unsupported_typescript_syntax [`NODE_OPTIONS`]: #node_optionsoptions [`NO_COLOR`]: https://no-color.org [`SlowBuffer`]: buffer.md#class-slowbuffer diff --git a/doc/api/errors.md b/doc/api/errors.md index eb90fee8fc5ed5..b14b668298e92c 100644 --- a/doc/api/errors.md +++ b/doc/api/errors.md @@ -2093,11 +2093,13 @@ does not consist of exactly two elements. -The provided TypeScript syntax is not valid or unsupported. -This could happen when using TypeScript syntax that requires -transformation with [type-stripping][]. +The provided TypeScript syntax is not valid. @@ -3096,6 +3098,18 @@ try { } ``` + + +### `ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX` + + + +The provided TypeScript syntax is unsupported. +This could happen when using TypeScript syntax that requires +transformation with [type-stripping][]. + ### `ERR_USE_AFTER_CLOSE` diff --git a/lib/internal/errors.js b/lib/internal/errors.js index bda50797124758..d6b2ceb5962351 100644 --- a/lib/internal/errors.js +++ b/lib/internal/errors.js @@ -1838,6 +1838,7 @@ E('ERR_UNSUPPORTED_NODE_MODULES_TYPE_STRIPPING', E('ERR_UNSUPPORTED_RESOLVE_REQUEST', 'Failed to resolve module specifier "%s" from "%s": Invalid relative URL or base scheme is not hierarchical.', TypeError); +E('ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX', '%s', SyntaxError); E('ERR_USE_AFTER_CLOSE', '%s was closed', Error); // This should probably be a `TypeError`. diff --git a/lib/internal/modules/typescript.js b/lib/internal/modules/typescript.js index 993fd3ff72d74d..689788b09853c4 100644 --- a/lib/internal/modules/typescript.js +++ b/lib/internal/modules/typescript.js @@ -12,8 +12,10 @@ const { assertTypeScript, isUnderNodeModules, kEmptyObject } = require('internal/util'); const { + ERR_INTERNAL_ASSERTION, ERR_INVALID_TYPESCRIPT_SYNTAX, ERR_UNSUPPORTED_NODE_MODULES_TYPE_STRIPPING, + ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX, } = require('internal/errors').codes; const { getOptionValue } = require('internal/options'); const assert = require('internal/assert'); @@ -49,7 +51,20 @@ function parseTypeScript(source, options) { try { return parse(source, options); } catch (error) { - throw new ERR_INVALID_TYPESCRIPT_SYNTAX(error.message); + /** + * Amaro v0.3.0 (from SWC v1.10.7) throws an object with `message` and `code` properties. + * It allows us to distinguish between invalid syntax and unsupported syntax. + */ + switch (error.code) { + case 'UnsupportedSyntax': + throw new ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX(error.message); + case 'InvalidSyntax': + throw new ERR_INVALID_TYPESCRIPT_SYNTAX(error.message); + default: + // SWC will throw strings when something goes wrong. + // Check if has the `message` property or treat it as a string. + throw new ERR_INTERNAL_ASSERTION(error.message ?? error); + } } } diff --git a/lib/internal/process/execution.js b/lib/internal/process/execution.js index f5b19d5a7e8c9c..d4d7a604851ef1 100644 --- a/lib/internal/process/execution.js +++ b/lib/internal/process/execution.js @@ -35,7 +35,7 @@ const { getOptionValue } = require('internal/options'); const { makeContextifyScript, runScriptInThisContext, } = require('internal/vm'); -const { emitExperimentalWarning, isError } = require('internal/util'); +const { emitExperimentalWarning } = require('internal/util'); // shouldAbortOnUncaughtToggle is a typed array for faster // communication with JS. const { shouldAbortOnUncaughtToggle } = internalBinding('util'); @@ -254,10 +254,6 @@ function evalTypeScript(name, source, breakFirstLine, print, shouldLoadESM = fal try { compiledScript = compileScript(name, source, baseUrl); } catch (originalError) { - // If it's not a SyntaxError, rethrow it. - if (!isError(originalError) || originalError.name !== 'SyntaxError') { - throw originalError; - } try { sourceToRun = stripTypeScriptModuleTypes(source, name, false); // Retry the CJS/ESM syntax detection after stripping the types. @@ -270,15 +266,14 @@ function evalTypeScript(name, source, breakFirstLine, print, shouldLoadESM = fal // Emit the experimental warning after the code was successfully evaluated. emitExperimentalWarning('Type Stripping'); } catch (tsError) { - // If its not an error, or it's not an invalid typescript syntax error, rethrow it. - if (!isError(tsError) || tsError?.code !== 'ERR_INVALID_TYPESCRIPT_SYNTAX') { - throw tsError; + // If it's invalid or unsupported TypeScript syntax, rethrow the original error + // with the TypeScript error message added to the stack. + if (tsError.code === 'ERR_INVALID_TYPESCRIPT_SYNTAX' || tsError.code === 'ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX') { + originalError.stack = decorateCJSErrorWithTSMessage(originalError.stack, tsError.message); + throw originalError; } - try { - originalError.stack = decorateCJSErrorWithTSMessage(originalError.stack, tsError.message); - } catch { /* Ignore potential errors coming from `stack` getter/setter */ } - throw originalError; + throw tsError; } } @@ -322,28 +317,23 @@ function evalTypeScriptModuleEntryPoint(source, print) { // Compile the module to check for syntax errors. moduleWrap = loader.createModuleWrap(source, url); } catch (originalError) { - // If it's not a SyntaxError, rethrow it. - if (!isError(originalError) || originalError.name !== 'SyntaxError') { - throw originalError; - } - let strippedSource; try { - strippedSource = stripTypeScriptModuleTypes(source, url, false); + const strippedSource = stripTypeScriptModuleTypes(source, url, false); // If the moduleWrap was successfully created, execute the module job. // outside the try-catch block to avoid catching runtime errors. moduleWrap = loader.createModuleWrap(strippedSource, url); // Emit the experimental warning after the code was successfully compiled. emitExperimentalWarning('Type Stripping'); } catch (tsError) { - // If its not an error, or it's not an invalid typescript syntax error, rethrow it. - if (!isError(tsError) || tsError?.code !== 'ERR_INVALID_TYPESCRIPT_SYNTAX') { - throw tsError; - } - try { + // If it's invalid or unsupported TypeScript syntax, rethrow the original error + // with the TypeScript error message added to the stack. + if (tsError.code === 'ERR_INVALID_TYPESCRIPT_SYNTAX' || + tsError.code === 'ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX') { originalError.stack = `${tsError.message}\n\n${originalError.stack}`; - } catch { /* Ignore potential errors coming from `stack` getter/setter */ } + throw originalError; + } - throw originalError; + throw tsError; } } // If the moduleWrap was successfully created either with by just compiling diff --git a/test/es-module/test-typescript-eval.mjs b/test/es-module/test-typescript-eval.mjs index 5c6f25bec4df7d..bbbed8863de25a 100644 --- a/test/es-module/test-typescript-eval.mjs +++ b/test/es-module/test-typescript-eval.mjs @@ -102,33 +102,33 @@ test('expect fail eval TypeScript ESM syntax with input-type commonjs-typescript strictEqual(result.code, 1); }); -test('check syntax error is thrown when passing invalid syntax', async () => { +test('check syntax error is thrown when passing unsupported syntax', async () => { const result = await spawnPromisified(process.execPath, [ '--eval', 'enum Foo { A, B, C }']); strictEqual(result.stdout, ''); match(result.stderr, /SyntaxError/); - doesNotMatch(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + doesNotMatch(result.stderr, /ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX/); strictEqual(result.code, 1); }); -test('check syntax error is thrown when passing invalid syntax with --input-type=module-typescript', async () => { +test('check syntax error is thrown when passing unsupported syntax with --input-type=module-typescript', async () => { const result = await spawnPromisified(process.execPath, [ '--input-type=module-typescript', '--eval', 'enum Foo { A, B, C }']); strictEqual(result.stdout, ''); - match(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + match(result.stderr, /ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX/); strictEqual(result.code, 1); }); -test('check syntax error is thrown when passing invalid syntax with --input-type=commonjs-typescript', async () => { +test('check syntax error is thrown when passing unsupported syntax with --input-type=commonjs-typescript', async () => { const result = await spawnPromisified(process.execPath, [ '--input-type=commonjs-typescript', '--eval', 'enum Foo { A, B, C }']); strictEqual(result.stdout, ''); - match(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + match(result.stderr, /ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX/); strictEqual(result.code, 1); }); @@ -140,7 +140,7 @@ test('should not parse TypeScript with --type-module=commonjs', async () => { strictEqual(result.stdout, ''); match(result.stderr, /SyntaxError/); - doesNotMatch(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + doesNotMatch(result.stderr, /ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX/); strictEqual(result.code, 1); }); @@ -152,7 +152,7 @@ test('should not parse TypeScript with --type-module=module', async () => { strictEqual(result.stdout, ''); match(result.stderr, /SyntaxError/); - doesNotMatch(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + doesNotMatch(result.stderr, /ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX/); strictEqual(result.code, 1); }); @@ -222,3 +222,23 @@ test('typescript CJS code is throwing a syntax error at runtime', async () => { strictEqual(result.stdout, ''); strictEqual(result.code, 1); }); + +test('check syntax error is thrown when passing invalid syntax with --input-type=commonjs-typescript', async () => { + const result = await spawnPromisified(process.execPath, [ + '--input-type=commonjs-typescript', + '--eval', + 'function foo(){ await Promise.resolve(1); }']); + strictEqual(result.stdout, ''); + match(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + strictEqual(result.code, 1); +}); + +test('check syntax error is thrown when passing invalid syntax with --input-type=module-typescript', async () => { + const result = await spawnPromisified(process.execPath, [ + '--input-type=module-typescript', + '--eval', + 'function foo(){ await Promise.resolve(1); }']); + strictEqual(result.stdout, ''); + match(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + strictEqual(result.code, 1); +}); diff --git a/test/es-module/test-typescript.mjs b/test/es-module/test-typescript.mjs index 81aed880bdcf51..74c4a0f120b758 100644 --- a/test/es-module/test-typescript.mjs +++ b/test/es-module/test-typescript.mjs @@ -321,3 +321,13 @@ test('execute a TypeScript loader and a .js file', async () => { match(result.stdout, /Hello, TypeScript!/); strictEqual(result.code, 0); }); + +test('execute invalid TypeScript syntax', async () => { + const result = await spawnPromisified(process.execPath, [ + fixtures.path('typescript/ts/test-invalid-syntax.ts'), + ]); + + match(result.stderr, /ERR_INVALID_TYPESCRIPT_SYNTAX/); + strictEqual(result.stdout, ''); + strictEqual(result.code, 1); +}); diff --git a/test/fixtures/typescript/ts/test-invalid-syntax.ts b/test/fixtures/typescript/ts/test-invalid-syntax.ts new file mode 100644 index 00000000000000..031bce938d27dc --- /dev/null +++ b/test/fixtures/typescript/ts/test-invalid-syntax.ts @@ -0,0 +1,3 @@ +function foo(): string { + await Promise.resolve(1); +} From 676276889e0cff7695d70746f22305ae0d8dfb2f Mon Sep 17 00:00:00 2001 From: Joyee Cheung Date: Tue, 14 Jan 2025 13:35:54 +0100 Subject: [PATCH 094/158] test: add maxCount and gcOptions to gcUntil() PR-URL: https://github.com/nodejs/node/pull/56522 Reviewed-By: James M Snell Reviewed-By: Chengzhong Wu --- test/common/gc.js | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/test/common/gc.js b/test/common/gc.js index 82cc4c79edc3dd..87625068c2cbca 100644 --- a/test/common/gc.js +++ b/test/common/gc.js @@ -3,6 +3,8 @@ const wait = require('timers/promises').setTimeout; const assert = require('assert'); const common = require('../common'); +// TODO(joyeecheung): rewrite checkIfCollectable to use this too. +const { setImmediate: setImmediatePromisified } = require('timers/promises'); const gcTrackerMap = new WeakMap(); const gcTrackerTag = 'NODE_TEST_COMMON_GC_TRACKER'; @@ -40,32 +42,26 @@ function onGC(obj, gcListener) { /** * Repeatedly triggers garbage collection until a specified condition is met or a maximum number of attempts is reached. + * This utillity must be run in a Node.js instance that enables --expose-gc. * @param {string|Function} [name] - Optional name, used in the rejection message if the condition is not met. * @param {Function} condition - A function that returns true when the desired condition is met. + * @param {number} maxCount - Maximum number of garbage collections that should be tried. + * @param {object} gcOptions - Options to pass into the global gc() function. * @returns {Promise} A promise that resolves when the condition is met, or rejects after 10 failed attempts. */ -function gcUntil(name, condition) { - if (typeof name === 'function') { - condition = name; - name = undefined; - } - return new Promise((resolve, reject) => { - let count = 0; - function gcAndCheck() { - setImmediate(() => { - count++; - global.gc(); - if (condition()) { - resolve(); - } else if (count < 10) { - gcAndCheck(); - } else { - reject(name === undefined ? undefined : 'Test ' + name + ' failed'); - } - }); +async function gcUntil(name, condition, maxCount = 10, gcOptions) { + for (let count = 0; count < maxCount; ++count) { + await setImmediatePromisified(); + if (gcOptions) { + await global.gc(gcOptions); + } else { + await global.gc(); // Passing in undefined is not the same as empty. } - gcAndCheck(); - }); + if (condition()) { + return; + } + } + throw new Error(`Test ${name} failed`); } // This function can be used to check if an object factor leaks or not, From 90f70ed8dd12252327d90217e39bfece2f8e9b48 Mon Sep 17 00:00:00 2001 From: Joyee Cheung Date: Thu, 9 Jan 2025 01:21:23 +0100 Subject: [PATCH 095/158] src: use cppgc to manage ContextifyContext This simplifies the memory management of ContextifyContext, making all references visible to V8. The destructors don't need to do anything because when the wrapper is going away, the context is already going away or otherwise it would've been holding the wrapper alive, so there's no need to reset the pointers in the context. Also, any global handles to the context would've been empty at this point, and the per-Environment context tracking code is capable of dealing with empty handles from contexts purged elsewhere. To this end, the context tracking code also purges empty handles from the list now, to prevent keeping too many empty handles around. PR-URL: https://github.com/nodejs/node/pull/56522 Reviewed-By: James M Snell Reviewed-By: Chengzhong Wu --- src/env.cc | 7 +- src/env.h | 1 + src/node_contextify.cc | 69 ++++++++++--------- src/node_contextify.h | 84 ++++++++++++++++++++---- test/parallel/test-inspector-contexts.js | 14 ++-- 5 files changed, 115 insertions(+), 60 deletions(-) diff --git a/src/env.cc b/src/env.cc index f0f97244fdef63..0eda889802710d 100644 --- a/src/env.cc +++ b/src/env.cc @@ -223,7 +223,12 @@ void AsyncHooks::InstallPromiseHooks(Local ctx) { : PersistentToLocal::Strong(js_promise_hooks_[3])); } +void Environment::PurgeTrackedEmptyContexts() { + std::erase_if(contexts_, [&](auto&& el) { return el.IsEmpty(); }); +} + void Environment::TrackContext(Local context) { + PurgeTrackedEmptyContexts(); size_t id = contexts_.size(); contexts_.resize(id + 1); contexts_[id].Reset(isolate_, context); @@ -232,7 +237,7 @@ void Environment::TrackContext(Local context) { void Environment::UntrackContext(Local context) { HandleScope handle_scope(isolate_); - std::erase_if(contexts_, [&](auto&& el) { return el.IsEmpty(); }); + PurgeTrackedEmptyContexts(); for (auto it = contexts_.begin(); it != contexts_.end(); it++) { if (Local saved_context = PersistentToLocal::Weak(isolate_, *it); saved_context == context) { diff --git a/src/env.h b/src/env.h index 4082458cf6aad2..5cd34759585eb2 100644 --- a/src/env.h +++ b/src/env.h @@ -1093,6 +1093,7 @@ class Environment final : public MemoryRetainer { const char* errmsg); void TrackContext(v8::Local context); void UntrackContext(v8::Local context); + void PurgeTrackedEmptyContexts(); std::list loaded_addons_; v8::Isolate* const isolate_; diff --git a/src/node_contextify.cc b/src/node_contextify.cc index 77d35675827c67..ab6659d8cdccc6 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -118,8 +118,9 @@ Local Uint32ToName(Local context, uint32_t index) { } // anonymous namespace -BaseObjectPtr ContextifyContext::New( - Environment* env, Local sandbox_obj, ContextOptions* options) { +ContextifyContext* ContextifyContext::New(Environment* env, + Local sandbox_obj, + ContextOptions* options) { Local object_template; HandleScope scope(env->isolate()); CHECK_IMPLIES(sandbox_obj.IsEmpty(), options->vanilla); @@ -140,21 +141,25 @@ BaseObjectPtr ContextifyContext::New( if (!(CreateV8Context(env->isolate(), object_template, snapshot_data, queue) .ToLocal(&v8_context))) { // Allocation failure, maximum call stack size reached, termination, etc. - return BaseObjectPtr(); + return {}; } return New(v8_context, env, sandbox_obj, options); } -void ContextifyContext::MemoryInfo(MemoryTracker* tracker) const {} +void ContextifyContext::Trace(cppgc::Visitor* visitor) const { + CppgcMixin::Trace(visitor); + visitor->Trace(context_); +} ContextifyContext::ContextifyContext(Environment* env, Local wrapper, Local v8_context, ContextOptions* options) - : BaseObject(env, wrapper), - microtask_queue_(options->own_microtask_queue + : microtask_queue_(options->own_microtask_queue ? options->own_microtask_queue.release() : nullptr) { + CppgcMixin::Wrap(this, env, wrapper); + context_.Reset(env->isolate(), v8_context); // This should only be done after the initial initializations of the context // global object is finished. @@ -162,19 +167,6 @@ ContextifyContext::ContextifyContext(Environment* env, ContextEmbedderIndex::kContextifyContext)); v8_context->SetAlignedPointerInEmbedderData( ContextEmbedderIndex::kContextifyContext, this); - // It's okay to make this reference weak - V8 would create an internal - // reference to this context via the constructor of the wrapper. - // As long as the wrapper is alive, it's constructor is alive, and so - // is the context. - context_.SetWeak(); -} - -ContextifyContext::~ContextifyContext() { - Isolate* isolate = env()->isolate(); - HandleScope scope(isolate); - - env()->UnassignFromContext(PersistentToLocal::Weak(isolate, context_)); - context_.Reset(); } void ContextifyContext::InitializeGlobalTemplates(IsolateData* isolate_data) { @@ -251,11 +243,10 @@ MaybeLocal ContextifyContext::CreateV8Context( return scope.Escape(ctx); } -BaseObjectPtr ContextifyContext::New( - Local v8_context, - Environment* env, - Local sandbox_obj, - ContextOptions* options) { +ContextifyContext* ContextifyContext::New(Local v8_context, + Environment* env, + Local sandbox_obj, + ContextOptions* options) { HandleScope scope(env->isolate()); CHECK_IMPLIES(sandbox_obj.IsEmpty(), options->vanilla); // This only initializes part of the context. The primordials are @@ -263,7 +254,7 @@ BaseObjectPtr ContextifyContext::New( // things down significantly and they are only needed in rare occasions // in the vm contexts. if (InitializeContextRuntime(v8_context).IsNothing()) { - return BaseObjectPtr(); + return {}; } Local main_context = env->context(); @@ -300,7 +291,7 @@ BaseObjectPtr ContextifyContext::New( info.origin = *origin_val; } - BaseObjectPtr result; + ContextifyContext* result; Local wrapper; { Context::Scope context_scope(v8_context); @@ -315,7 +306,7 @@ BaseObjectPtr ContextifyContext::New( ctor_name, static_cast(v8::DontEnum)) .IsNothing()) { - return BaseObjectPtr(); + return {}; } } @@ -328,7 +319,7 @@ BaseObjectPtr ContextifyContext::New( env->host_defined_option_symbol(), options->host_defined_options_id) .IsNothing()) { - return BaseObjectPtr(); + return {}; } env->AssignToContext(v8_context, nullptr, info); @@ -336,13 +327,15 @@ BaseObjectPtr ContextifyContext::New( if (!env->contextify_wrapper_template() ->NewInstance(v8_context) .ToLocal(&wrapper)) { - return BaseObjectPtr(); + return {}; } - result = - MakeBaseObject(env, wrapper, v8_context, options); - // The only strong reference to the wrapper will come from the sandbox. - result->MakeWeak(); + result = cppgc::MakeGarbageCollected( + env->isolate()->GetCppHeap()->GetAllocationHandle(), + env, + wrapper, + v8_context, + options); } Local wrapper_holder = @@ -352,7 +345,7 @@ BaseObjectPtr ContextifyContext::New( ->SetPrivate( v8_context, env->contextify_context_private_symbol(), wrapper) .IsNothing()) { - return BaseObjectPtr(); + return {}; } // Assign host_defined_options_id to the sandbox object or the global object @@ -364,7 +357,7 @@ BaseObjectPtr ContextifyContext::New( env->host_defined_option_symbol(), options->host_defined_options_id) .IsNothing()) { - return BaseObjectPtr(); + return {}; } return result; } @@ -438,7 +431,7 @@ void ContextifyContext::MakeContext(const FunctionCallbackInfo& args) { options.host_defined_options_id = args[6].As(); TryCatchScope try_catch(env); - BaseObjectPtr context_ptr = + ContextifyContext* context_ptr = ContextifyContext::New(env, sandbox, &options); if (try_catch.HasCaught()) { @@ -469,6 +462,10 @@ ContextifyContext* ContextifyContext::ContextFromContextifiedSandbox( template ContextifyContext* ContextifyContext::Get(const PropertyCallbackInfo& args) { + // TODO(joyeecheung): it should be fine to simply use + // args.GetIsolate()->GetCurrentContext() and take the pointer at + // ContextEmbedderIndex::kContextifyContext, as V8 is supposed to + // push the creation context before invoking these callbacks. return Get(args.This()); } diff --git a/src/node_contextify.h b/src/node_contextify.h index d67968406d7b74..de69c22b0ebaed 100644 --- a/src/node_contextify.h +++ b/src/node_contextify.h @@ -23,17 +23,73 @@ struct ContextOptions { bool vanilla = false; }; -class ContextifyContext : public BaseObject { +/** + * The memory management of a vm context is as follows: + * + * user code + * │ + * As global proxy or ▼ + * ┌──────────────┐ kSandboxObject embedder data ┌────────────────┐ + * ┌─► │ V8 Context │────────────────────────────────►│ Wrapper holder │ + * │ └──────────────┘ └───────┬────────┘ + * │ ▲ Object constructor/creation context │ + * │ │ │ + * │ ┌──────┴────────────┐ contextify_context_private_symbol │ + * │ │ ContextifyContext │◄────────────────────────────────────┘ + * │ │ JS Wrapper │◄──────────► ┌─────────────────────────┐ + * │ └───────────────────┘ cppgc │ node::ContextifyContext │ + * │ │ C++ Object │ + * └──────────────────────────────────► └─────────────────────────┘ + * v8::TracedReference / ContextEmbedderIndex::kContextifyContext + * + * There are two possibilities for the "wrapper holder": + * + * 1. When vm.constants.DONT_CONTEXTIFY is used, the wrapper holder is the V8 + * context's global proxy object + * 2. Otherwise it's the arbitrary "sandbox object" that users pass into + * vm.createContext() or a new empty object created internally if they pass + * undefined. + * + * In 2, the global object of the new V8 context is created using + * global_object_template with interceptors that perform any requested + * operations on the global object in the context first on the sandbox object + * living outside of the new context, then fall back to the global proxy of the + * new context. + * + * It's critical for the user-accessible wrapper holder to keep the + * ContextifyContext wrapper alive via contextify_context_private_symbol + * so that the V8 context is always available to the user while they still + * hold the vm "context" object alive. + * + * It's also critical for the V8 context to keep the wrapper holder + * (specifically, the "sandbox object" if users pass one) as well as the + * node::ContextifyContext C++ object alive, so that when the code + * runs inside the object and accesses the global object, the interceptors + * can still access the "sandbox object" and perform operations + * on them, even if users already relinquish access to the outer + * "sandbox object". + * + * The v8::TracedReference and the ContextEmbedderIndex::kContextifyContext + * slot in the context only act as shortcuts between + * the node::ContextifyContext C++ object and the V8 context. + */ +class ContextifyContext final : CPPGC_MIXIN(ContextifyContext) { public: + SET_CPPGC_NAME(ContextifyContext) + void Trace(cppgc::Visitor* visitor) const final; + ContextifyContext(Environment* env, v8::Local wrapper, v8::Local v8_context, ContextOptions* options); - ~ContextifyContext(); - void MemoryInfo(MemoryTracker* tracker) const override; - SET_MEMORY_INFO_NAME(ContextifyContext) - SET_SELF_SIZE(ContextifyContext) + // The destructors don't need to do anything because when the wrapper is + // going away, the context is already going away or otherwise it would've + // been holding the wrapper alive, so there's no need to reset the pointers + // in the context. Also, any global handles to the context would've been + // empty at this point, and the per-Environment context tracking code is + // capable of dealing with empty handles from contexts purged elsewhere. + ~ContextifyContext() = default; static v8::MaybeLocal CreateV8Context( v8::Isolate* isolate, @@ -48,7 +104,7 @@ class ContextifyContext : public BaseObject { Environment* env, const v8::Local& wrapper_holder); inline v8::Local context() const { - return PersistentToLocal::Default(env()->isolate(), context_); + return context_.Get(env()->isolate()); } inline v8::Local global_proxy() const { @@ -75,14 +131,14 @@ class ContextifyContext : public BaseObject { static void InitializeGlobalTemplates(IsolateData* isolate_data); private: - static BaseObjectPtr New(Environment* env, - v8::Local sandbox_obj, - ContextOptions* options); + static ContextifyContext* New(Environment* env, + v8::Local sandbox_obj, + ContextOptions* options); // Initialize a context created from CreateV8Context() - static BaseObjectPtr New(v8::Local ctx, - Environment* env, - v8::Local sandbox_obj, - ContextOptions* options); + static ContextifyContext* New(v8::Local ctx, + Environment* env, + v8::Local sandbox_obj, + ContextOptions* options); static bool IsStillInitializing(const ContextifyContext* ctx); static void MakeContext(const v8::FunctionCallbackInfo& args); @@ -140,7 +196,7 @@ class ContextifyContext : public BaseObject { static void IndexedPropertyEnumeratorCallback( const v8::PropertyCallbackInfo& args); - v8::Global context_; + v8::TracedReference context_; std::unique_ptr microtask_queue_; }; diff --git a/test/parallel/test-inspector-contexts.js b/test/parallel/test-inspector-contexts.js index 9cdf2d0017c4be..3d6ee4d460e863 100644 --- a/test/parallel/test-inspector-contexts.js +++ b/test/parallel/test-inspector-contexts.js @@ -8,7 +8,7 @@ common.skipIfInspectorDisabled(); const assert = require('assert'); const vm = require('vm'); const { Session } = require('inspector'); - +const { gcUntil } = require('../common/gc'); const session = new Session(); session.connect(); @@ -66,8 +66,7 @@ async function testContextCreatedAndDestroyed() { // GC is unpredictable... console.log('Checking/waiting for GC.'); - while (!contextDestroyed) - global.gc(); + await gcUntil('context destruction', () => contextDestroyed, Infinity, { type: 'major', execution: 'async' }); console.log('Context destroyed.'); assert.strictEqual(contextDestroyed.params.executionContextId, id, @@ -98,8 +97,7 @@ async function testContextCreatedAndDestroyed() { // GC is unpredictable... console.log('Checking/waiting for GC again.'); - while (!contextDestroyed) - global.gc(); + await gcUntil('context destruction', () => contextDestroyed, Infinity, { type: 'major', execution: 'async' }); console.log('Other context destroyed.'); } @@ -124,8 +122,7 @@ async function testContextCreatedAndDestroyed() { // GC is unpredictable... console.log('Checking/waiting for GC a third time.'); - while (!contextDestroyed) - global.gc(); + await gcUntil('context destruction', () => contextDestroyed, Infinity, { type: 'major', execution: 'async' }); console.log('Context destroyed once again.'); } @@ -148,8 +145,7 @@ async function testContextCreatedAndDestroyed() { // GC is unpredictable... console.log('Checking/waiting for GC a fourth time.'); - while (!contextDestroyed) - global.gc(); + await gcUntil('context destruction', () => contextDestroyed, Infinity, { type: 'major', execution: 'async' }); console.log('Context destroyed a fourth time.'); } } From bcc1c650662544f81c6775742438fd1fe8af0459 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Fri, 17 Jan 2025 17:43:26 +0100 Subject: [PATCH 096/158] tools: fix permissions in `lint-release-proposal` workflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56614 Reviewed-By: Michaël Zasso Reviewed-By: Ruy Adorno Reviewed-By: Luigi Pinca --- .github/workflows/lint-release-proposal.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/lint-release-proposal.yml b/.github/workflows/lint-release-proposal.yml index ecda2b616c0d02..9d8ba5998a7a5c 100644 --- a/.github/workflows/lint-release-proposal.yml +++ b/.github/workflows/lint-release-proposal.yml @@ -19,6 +19,8 @@ permissions: jobs: lint-release-commit: runs-on: ubuntu-latest + permissions: + pull-requests: read steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: From d83d89a08efe2fe8bf1a423acdcc7a886a354179 Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Fri, 17 Jan 2025 12:58:47 -0500 Subject: [PATCH 097/158] crypto: add missing return value check Add return value check for call to SSL_CTX_add_client_CA to be consistent with other places it is called Fixed unused warning in one of the static analysis tools we use at Red Hat even though it is not being reported by coverity in the configuration we run. Signed-off-by: Michael Dawson PR-URL: https://github.com/nodejs/node/pull/56615 Reviewed-By: Luigi Pinca Reviewed-By: James M Snell --- src/crypto/crypto_context.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crypto/crypto_context.cc b/src/crypto/crypto_context.cc index 8f1e6dc7110b11..c7574e67f03f03 100644 --- a/src/crypto/crypto_context.cc +++ b/src/crypto/crypto_context.cc @@ -1164,7 +1164,7 @@ void SecureContext::LoadPKCS12(const FunctionCallbackInfo& args) { X509* ca = sk_X509_value(extra_certs.get(), i); X509_STORE_add_cert(sc->GetCertStoreOwnedByThisSecureContext(), ca); - SSL_CTX_add_client_CA(sc->ctx_.get(), ca); + CHECK_EQ(1, SSL_CTX_add_client_CA(sc->ctx_.get(), ca)); } ret = true; From 9ffe3ad4b1e5a98b911ddafe431fc52819090771 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Fri, 17 Jan 2025 13:54:52 -0500 Subject: [PATCH 098/158] deps: update libuv to 1.50.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56616 Reviewed-By: Rafael Gonzaga Reviewed-By: Colin Ihrig Reviewed-By: Juan José Arboleda Reviewed-By: Santiago Gimeno Reviewed-By: Luigi Pinca Reviewed-By: Ulises Gascón Reviewed-By: Richard Lau --- deps/uv/.mailmap | 1 + deps/uv/AUTHORS | 4 +- deps/uv/CMakeLists.txt | 5 +- deps/uv/ChangeLog | 83 ++++- deps/uv/LINKS.md | 1 + deps/uv/MAINTAINERS.md | 6 +- deps/uv/Makefile.am | 3 +- deps/uv/SUPPORTED_PLATFORMS.md | 4 +- deps/uv/configure.ac | 2 +- deps/uv/docs/src/fs_event.rst | 5 + deps/uv/docs/src/misc.rst | 11 + deps/uv/docs/src/threading.rst | 25 ++ deps/uv/docs/src/threadpool.rst | 2 + deps/uv/docs/src/timer.rst | 14 +- deps/uv/docs/src/udp.rst | 14 + deps/uv/include/uv.h | 12 + deps/uv/include/uv/errno.h | 6 + deps/uv/include/uv/unix.h | 5 +- deps/uv/include/uv/version.h | 4 +- deps/uv/include/uv/win.h | 10 +- deps/uv/src/fs-poll.c | 3 + deps/uv/src/idna.c | 2 +- deps/uv/src/threadpool.c | 1 + deps/uv/src/unix/async.c | 83 +++++ deps/uv/src/unix/core.c | 50 ++- deps/uv/src/unix/darwin-proctitle.c | 20 +- deps/uv/src/unix/internal.h | 24 ++ deps/uv/src/unix/kqueue.c | 29 +- deps/uv/src/unix/linux.c | 75 +++-- deps/uv/src/unix/pipe.c | 30 +- deps/uv/src/unix/thread.c | 98 ++++++ deps/uv/src/unix/udp.c | 387 ++++++++++++------------ deps/uv/src/uv-common.c | 22 ++ deps/uv/src/uv-common.h | 20 ++ deps/uv/src/win/core.c | 110 +------ deps/uv/src/win/fs-event.c | 4 + deps/uv/src/win/fs.c | 205 ++++++++++++- deps/uv/src/win/pipe.c | 12 +- deps/uv/src/win/thread.c | 74 +++++ deps/uv/src/win/udp.c | 21 +- deps/uv/src/win/util.c | 92 ++++-- deps/uv/src/win/winapi.c | 13 - deps/uv/src/win/winapi.h | 108 +++---- deps/uv/src/win/winsock.h | 41 --- deps/uv/test/runner.c | 14 + deps/uv/test/test-fs-event.c | 9 +- deps/uv/test/test-fs.c | 54 ++++ deps/uv/test/test-idna.c | 26 +- deps/uv/test/test-list.h | 10 + deps/uv/test/test-pipe-getsockname.c | 9 + deps/uv/test/test-platform-output.c | 16 + deps/uv/test/test-spawn.c | 6 +- deps/uv/test/test-thread-name.c | 189 ++++++++++++ deps/uv/test/test-thread.c | 10 + deps/uv/test/test-udp-mmsg.c | 5 +- deps/uv/test/test-udp-multicast-join.c | 19 +- deps/uv/test/test-udp-multicast-join6.c | 1 + deps/uv/test/test-udp-try-send.c | 40 ++- 58 files changed, 1582 insertions(+), 567 deletions(-) create mode 100644 deps/uv/test/test-thread-name.c diff --git a/deps/uv/.mailmap b/deps/uv/.mailmap index 97f5d1f2c004c9..f5d5375e044e18 100644 --- a/deps/uv/.mailmap +++ b/deps/uv/.mailmap @@ -52,6 +52,7 @@ San-Tai Hsu Santiago Gimeno Saúl Ibarra Corretgé Saúl Ibarra Corretgé +Saúl Ibarra Corretgé Shigeki Ohtsu Shuowang (Wayne) Zhang TK-one diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS index 041b7aff610f57..39550bbc535eb2 100644 --- a/deps/uv/AUTHORS +++ b/deps/uv/AUTHORS @@ -588,5 +588,7 @@ Raihaan Shouhell Rialbat Adam Poul T Lomholt -dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Thad House +Julian A Avar C <28635807+julian-a-avar-c@users.noreply.github.com> +amcgoogan <105525867+amcgoogan@users.noreply.github.com> +Rafael Gonzaga diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt index 28c6df25666967..af89db2dfc2762 100644 --- a/deps/uv/CMakeLists.txt +++ b/deps/uv/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.9) +cmake_minimum_required(VERSION 3.10) if(POLICY CMP0091) cmake_policy(SET CMP0091 NEW) # Enable MSVC_RUNTIME_LIBRARY setting @@ -186,7 +186,7 @@ set(uv_sources src/version.c) if(WIN32) - list(APPEND uv_defines WIN32_LEAN_AND_MEAN _WIN32_WINNT=0x0602 _CRT_DECLARE_NONSTDC_NAMES=0) + list(APPEND uv_defines WIN32_LEAN_AND_MEAN _WIN32_WINNT=0x0A00 _CRT_DECLARE_NONSTDC_NAMES=0) list(APPEND uv_libraries psapi user32 @@ -667,6 +667,7 @@ if(LIBUV_BUILD_TESTS) test/test-thread-affinity.c test/test-thread-equal.c test/test-thread.c + test/test-thread-name.c test/test-thread-priority.c test/test-threadpool-cancel.c test/test-threadpool.c diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog index dc2dd2790c57d3..006a9e1b415de9 100644 --- a/deps/uv/ChangeLog +++ b/deps/uv/ChangeLog @@ -1,4 +1,85 @@ -2024.10.18, Version 1.49.2 (Stable) +2025.01.15, Version 1.50.0 (Stable) + +Changes since version 1.49.2: + +* ci: run macOS and iOS tests also on macOS 14 (Saúl Ibarra Corretgé) + +* unix,win: map ENOEXEC errno (Saúl Ibarra Corretgé) + +* test: skip multicast join test on ENOEXEC (Saúl Ibarra Corretgé) + +* ci: make sure the macOS firewall is disabled (Saúl Ibarra Corretgé) + +* darwin,test: squelch EBUSY error on multicast join (Saúl Ibarra Corretgé) + +* build: update minimum cmake to 3.10 (Ben Noordhuis) + +* kqueue: use EVFILT_USER for async if available (Jameson Nash) + +* unix,win: fix off-by-one in uv_wtf8_to_utf16() (Ben Noordhuis) + +* doc: add scala-native-loop to LINKS.md (Julian A Avar C) + +* unix: fix build breakage on haiku, openbsd, etc (Jeffrey H. Johnson) + +* kqueue: lower overhead in uv__io_check_fd (Andy Pan) + +* doc: move cjihrig back to active maintainers (cjihrig) + +* build(deps): bump actions/checkout from 3 to 4 (dependabot[bot]) + +* unix,pipe: fix handling null buffer in uv_pipe_get{sock,peer}name (Saúl + Ibarra Corretgé) + +* unix,win: harmonize buffer checking (Saúl Ibarra Corretgé) + +* unix,win: add support for detached threads (Juan José Arboleda) + +* src: add uv_thread_set/getname() methods (Santiago Gimeno) + +* build: fix qemu builds (Ben Noordhuis) + +* win: drop support for windows 8 (Ben Noordhuis) + +* linux: fix uv_cpu_info() arm cpu model detection (Ben Noordhuis) + +* linux: always use io_uring for epoll batching (Ben Noordhuis) + +* doc: clarify repeating timer behavior more (Ben Noordhuis) + +* unix,win: handle nbufs=0 in uv_udp_try_send (Ben Noordhuis) + +* win: use GetQueuedCompletionStatusEx directly (Saúl Ibarra Corretgé) + +* win: enable uv_thread_{get,set}name on MinGW (Saúl Ibarra Corretgé) + +* win: drop support for the legacy MinGW (Saúl Ibarra Corretgé) + +* win,fs: get (most) fstat when no permission (Jameson Nash) + +* win: plug uv_fs_event_start memory leak (amcgoogan) + +* test: address FreeBSD kernel bug causing NULL path in fsevents (Juan José + Arboleda) + +* unix: refactor udp sendmsg code (Ben Noordhuis) + +* unix,win: add uv_udp_try_send2 (Ben Noordhuis) + +* test: fix flaky flaky udp_mmsg test (Juan José Arboleda) + +* build: enable fdsan in Android (Juan José Arboleda) + +* test: fix udp-multicast-join for FreeBSD (Juan José Arboleda) + +* win: fix leak processing fs event (Saúl Ibarra Corretgé) + +* src: set a default thread name for workers (Rafael Gonzaga) + +* misc: implement uv_getrusage_thread (Juan José Arboleda) + + +2024.10.18, Version 1.49.2 (Stable), e1095c7a4373ce00cd8874d8e820de5afb25776e Changes since version 1.49.1: diff --git a/deps/uv/LINKS.md b/deps/uv/LINKS.md index 3e5800747bc7dd..743935cebb8532 100644 --- a/deps/uv/LINKS.md +++ b/deps/uv/LINKS.md @@ -37,6 +37,7 @@ * [Pixie-io](https://github.com/pixie-io/pixie): Open-source observability tool for Kubernetes applications. * [potion](https://github.com/perl11/potion)/[p2](https://github.com/perl11/p2): runtime * [racer](https://libraries.io/rubygems/racer): Ruby web server written as an C extension +* [scala-native-loop](https://github.com/scala-native/scala-native-loop): Extensible event loop and async-oriented IO for Scala Native; powered by libuv * [Socket Runtime](https://sockets.sh): A runtime for creating native cross-platform software on mobile and desktop using HTML, CSS, and JavaScript * [spider-gazelle](https://github.com/cotag/spider-gazelle): Ruby web server using libuv bindings * [Suave](http://suave.io/): A simple web development F# library providing a lightweight web server and a set of combinators to manipulate route flow and task composition diff --git a/deps/uv/MAINTAINERS.md b/deps/uv/MAINTAINERS.md index 41c60cb383cfbe..ff8be88b7b7cd5 100644 --- a/deps/uv/MAINTAINERS.md +++ b/deps/uv/MAINTAINERS.md @@ -4,6 +4,9 @@ libuv is currently managed by the following individuals: * **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis)) - GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis) +* **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig)) + - GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig) + - GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb) * **Jameson Nash** ([@vtjnash](https://github.com/vtjnash)) - GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash) - GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash) @@ -24,9 +27,6 @@ libuv is currently managed by the following individuals: * **Anna Henningsen** ([@addaleax](https://github.com/addaleax)) * **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz)) * **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus)) -* **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig)) - - GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig) - - GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb) * **Fedor Indutny** ([@indutny](https://github.com/indutny)) - GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny) * **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq)) diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am index f85a41316c8a43..9b9e6be7178b22 100644 --- a/deps/uv/Makefile.am +++ b/deps/uv/Makefile.am @@ -59,7 +59,7 @@ if WINNT uvinclude_HEADERS += include/uv/win.h include/uv/tree.h AM_CPPFLAGS += -I$(top_srcdir)/src/win \ -DWIN32_LEAN_AND_MEAN \ - -D_WIN32_WINNT=0x0602 + -D_WIN32_WINNT=0x0A00 libuv_la_SOURCES += src/win/async.c \ src/win/atomicops-inl.h \ src/win/core.c \ @@ -294,6 +294,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-thread-equal.c \ test/test-thread.c \ test/test-thread-affinity.c \ + test/test-thread-name.c \ test/test-thread-priority.c \ test/test-threadpool-cancel.c \ test/test-threadpool.c \ diff --git a/deps/uv/SUPPORTED_PLATFORMS.md b/deps/uv/SUPPORTED_PLATFORMS.md index 8a435d2592e47f..9597801b919687 100644 --- a/deps/uv/SUPPORTED_PLATFORMS.md +++ b/deps/uv/SUPPORTED_PLATFORMS.md @@ -4,14 +4,14 @@ |---|---|---|---| | GNU/Linux | Tier 1 | Linux >= 3.10 with glibc >= 2.17 | | | macOS | Tier 1 | macOS >= 11 | Currently supported macOS releases | -| Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported | +| Windows | Tier 1 | >= Windows 10 | VS 2015 and later are supported | | FreeBSD | Tier 2 | >= 12 | | | AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix | | IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi | | z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos | | Linux with musl | Tier 2 | musl >= 1.0 | | | Android | Tier 3 | NDK >= r15b | Android 7.0, `-DANDROID_PLATFORM=android-24` | -| MinGW | Tier 3 | MinGW32 and MinGW-w64 | | +| MinGW | Tier 3 | MinGW-w64 | | | SunOS | Tier 3 | Solaris 121 and later | | | Other | Tier 3 | N/A | | diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac index 98c59363026f86..fc8316b8e8fa75 100644 --- a/deps/uv/configure.ac +++ b/deps/uv/configure.ac @@ -13,7 +13,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [1.49.2], [https://github.com/libuv/libuv/issues]) +AC_INIT([libuv], [1.50.0], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) diff --git a/deps/uv/docs/src/fs_event.rst b/deps/uv/docs/src/fs_event.rst index 983db1a9d5608a..bfdecdd7329cd2 100644 --- a/deps/uv/docs/src/fs_event.rst +++ b/deps/uv/docs/src/fs_event.rst @@ -47,6 +47,11 @@ Data types The `events` parameter is an ORed mask of :c:enum:`uv_fs_event` elements. +.. note:: + For FreeBSD path could sometimes be `NULL` due to a kernel bug. + + .. _Reference: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=197695 + .. c:enum:: uv_fs_event Event types that :c:type:`uv_fs_event_t` handles monitor. diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst index 61883b7e21e527..db95e2dde83ea1 100644 --- a/deps/uv/docs/src/misc.rst +++ b/deps/uv/docs/src/misc.rst @@ -360,6 +360,17 @@ API On Windows not all fields are set, the unsupported fields are filled with zeroes. See :c:type:`uv_rusage_t` for more details. +.. c:function:: int uv_getrusage_thread(uv_rusage_t* rusage) + + Gets the resource usage measures for the calling thread. + + .. versionadded:: 1.50.0 + + .. note:: + Not supported on all platforms. May return `UV_ENOTSUP`. + On macOS and Windows not all fields are set, the unsupported fields are filled with zeroes. + See :c:type:`uv_rusage_t` for more details. + .. c:function:: uv_pid_t uv_os_getpid(void) Returns the current process ID. diff --git a/deps/uv/docs/src/threading.rst b/deps/uv/docs/src/threading.rst index 883218fa829ccb..f40cf0a33c8121 100644 --- a/deps/uv/docs/src/threading.rst +++ b/deps/uv/docs/src/threading.rst @@ -78,6 +78,14 @@ Threads .. versionchanged:: 1.4.1 returns a UV_E* error code on failure +.. c:function:: int uv_thread_detach(uv_thread_t* tid) + + Detaches a thread. Detached threads automatically release their + resources upon termination, eliminating the need for the application to + call `uv_thread_join`. + + .. versionadded:: 1.50.0 + .. c:function:: int uv_thread_create_ex(uv_thread_t* tid, const uv_thread_options_t* params, uv_thread_cb entry, void* arg) Like :c:func:`uv_thread_create`, but additionally specifies options for creating a new thread. @@ -132,6 +140,23 @@ Threads .. c:function:: int uv_thread_join(uv_thread_t *tid) .. c:function:: int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) +.. c:function:: int uv_thread_setname(const char* name) + + Sets the name of the current thread. Different platforms define different limits on the max number of characters + a thread name can be: Linux, IBM i (16), macOS (64), Windows (32767), and NetBSD (32), etc. `uv_thread_setname()` + will truncate it in case `name` is larger than the limit of the platform. + + .. versionadded:: 1.50.0 + +.. c:function:: int uv_thread_getname(uv_thread_t* tid, char* name, size_t* size) + + Gets the name of the thread specified by `tid`. The thread name is copied, with the trailing NUL, into the buffer + pointed to by `name`. The `size` parameter specifies the size of the buffer pointed to by `name`. + The buffer should be large enough to hold the name of the thread plus the trailing NUL, or it will be truncated to fit + with the trailing NUL. + + .. versionadded:: 1.50.0 + .. c:function:: int uv_thread_setpriority(uv_thread_t tid, int priority) If the function succeeds, the return value is 0. If the function fails, the return value is less than zero. diff --git a/deps/uv/docs/src/threadpool.rst b/deps/uv/docs/src/threadpool.rst index 7cfa797314ca48..05f31d2ccf30b8 100644 --- a/deps/uv/docs/src/threadpool.rst +++ b/deps/uv/docs/src/threadpool.rst @@ -17,6 +17,8 @@ is 1024). .. versionchanged:: 1.45.0 threads now have an 8 MB stack instead of the (sometimes too low) platform default. +.. versionchanged:: 1.50.0 threads now have a default name of libuv-worker. + The threadpool is global and shared across all event loops. When a particular function makes use of the threadpool (i.e. when using :c:func:`uv_queue_work`) libuv preallocates and initializes the maximum number of threads allowed by diff --git a/deps/uv/docs/src/timer.rst b/deps/uv/docs/src/timer.rst index 070fa79da9d6df..474c6b8c4cd4f6 100644 --- a/deps/uv/docs/src/timer.rst +++ b/deps/uv/docs/src/timer.rst @@ -6,6 +6,15 @@ Timer handles are used to schedule callbacks to be called in the future. +Timers are either single-shot or repeating. Repeating timers do not adjust +for overhead but are rearmed relative to the event loop's idea of "now". + +Libuv updates its idea of "now" right before executing timer callbacks, and +right after waking up from waiting for I/O. See also :c:func:`uv_update_time`. + +Example: a repeating timer with a 50 ms interval whose callback takes 17 ms +to complete, runs again 33 ms later. If other tasks take longer than 33 ms, +the timer callback runs as soon as possible. Data types ---------- @@ -64,11 +73,6 @@ API duration, and will follow normal timer semantics in the case of a time-slice overrun. - For example, if a 50ms repeating timer first runs for 17ms, it will be - scheduled to run again 33ms later. If other tasks consume more than the - 33ms following the first timer callback, then the callback will run as soon - as possible. - .. note:: If the repeat value is set from a timer callback it does not immediately take effect. If the timer was non-repeating before, it will have been stopped. If it was repeating, diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst index 31f7f7fd71ff47..5f225e5cda4011 100644 --- a/deps/uv/docs/src/udp.rst +++ b/deps/uv/docs/src/udp.rst @@ -426,6 +426,20 @@ API .. versionchanged:: 1.27.0 added support for connected sockets +.. c:function:: int uv_udp_try_send2(uv_udp_t* handle, unsigned int count, uv_buf_t* bufs[/*count*/], unsigned int nbufs[/*count*/], struct sockaddr* addrs[/*count*/], unsigned int flags) + + Like :c:func:`uv_udp_try_send`, but can send multiple datagrams. + Lightweight abstraction around :man:`sendmmsg(2)`, with a :man:`sendmsg(2)` + fallback loop for platforms that do not support the former. The handle must + be fully initialized; call c:func:`uv_udp_bind` first. + + :returns: >= 0: number of datagrams sent. Zero only if `count` was zero. + < 0: negative error code. Only if sending the first datagram fails, + otherwise returns a positive send count. ``UV_EAGAIN`` when datagrams + cannot be sent right now; fall back to :c:func:`uv_udp_send`. + + .. versionadded:: 1.50.0 + .. c:function:: int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) Prepare for receiving data. If the socket has not previously been bound diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h index 9e450c5110fe57..f0ec376b607c05 100644 --- a/deps/uv/include/uv.h +++ b/deps/uv/include/uv.h @@ -157,6 +157,7 @@ struct uv__queue { XX(ESOCKTNOSUPPORT, "socket type not supported") \ XX(ENODATA, "no data available") \ XX(EUNATCH, "protocol driver not attached") \ + XX(ENOEXEC, "exec format error") \ #define UV_HANDLE_TYPE_MAP(XX) \ XX(ASYNC, async) \ @@ -775,6 +776,12 @@ UV_EXTERN int uv_udp_try_send(uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr); +UV_EXTERN int uv_udp_try_send2(uv_udp_t* handle, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/], + unsigned int flags); UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb); @@ -1288,6 +1295,7 @@ typedef struct { } uv_rusage_t; UV_EXTERN int uv_getrusage(uv_rusage_t* rusage); +UV_EXTERN int uv_getrusage_thread(uv_rusage_t* rusage); UV_EXTERN int uv_os_homedir(char* buffer, size_t* size); UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size); @@ -1869,6 +1877,7 @@ UV_EXTERN int uv_gettimeofday(uv_timeval64_t* tv); typedef void (*uv_thread_cb)(void* arg); UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg); +UV_EXTERN int uv_thread_detach(uv_thread_t* tid); typedef enum { UV_THREAD_NO_FLAGS = 0x00, @@ -1898,6 +1907,9 @@ UV_EXTERN int uv_thread_getcpu(void); UV_EXTERN uv_thread_t uv_thread_self(void); UV_EXTERN int uv_thread_join(uv_thread_t *tid); UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2); +UV_EXTERN int uv_thread_setname(const char* name); +UV_EXTERN int uv_thread_getname(uv_thread_t* tid, char* name, size_t size); + /* The presence of these unions force similar struct layout. */ #define XX(_, name) uv_ ## name ## _t name; diff --git a/deps/uv/include/uv/errno.h b/deps/uv/include/uv/errno.h index 127278ef916161..ac00778cfc59fb 100644 --- a/deps/uv/include/uv/errno.h +++ b/deps/uv/include/uv/errno.h @@ -474,4 +474,10 @@ # define UV__EUNATCH (-4023) #endif +#if defined(ENOEXEC) && !defined(_WIN32) +# define UV__ENOEXEC UV__ERR(ENOEXEC) +#else +# define UV__ENOEXEC (-4022) +#endif + #endif /* UV_ERRNO_H_ */ diff --git a/deps/uv/include/uv/unix.h b/deps/uv/include/uv/unix.h index 538f98b6c5d657..7c972026f688e8 100644 --- a/deps/uv/include/uv/unix.h +++ b/deps/uv/include/uv/unix.h @@ -271,7 +271,10 @@ typedef struct { #define UV_UDP_SEND_PRIVATE_FIELDS \ struct uv__queue queue; \ - struct sockaddr_storage addr; \ + union { \ + struct sockaddr addr; \ + struct sockaddr_storage storage; \ + } u; \ unsigned int nbufs; \ uv_buf_t* bufs; \ ssize_t status; \ diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h index cfa7871322e690..76eb7d125fe468 100644 --- a/deps/uv/include/uv/version.h +++ b/deps/uv/include/uv/version.h @@ -31,8 +31,8 @@ */ #define UV_VERSION_MAJOR 1 -#define UV_VERSION_MINOR 49 -#define UV_VERSION_PATCH 2 +#define UV_VERSION_MINOR 50 +#define UV_VERSION_PATCH 0 #define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_SUFFIX "" diff --git a/deps/uv/include/uv/win.h b/deps/uv/include/uv/win.h index 12ac53b4f217d2..58d10b8d07fa0b 100644 --- a/deps/uv/include/uv/win.h +++ b/deps/uv/include/uv/win.h @@ -20,7 +20,7 @@ */ #ifndef _WIN32_WINNT -# define _WIN32_WINNT 0x0600 +# define _WIN32_WINNT 0x0A00 #endif #if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED) @@ -32,14 +32,6 @@ typedef intptr_t ssize_t; #include -#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) -typedef struct pollfd { - SOCKET fd; - short events; - short revents; -} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD; -#endif - #ifndef LOCALE_INVARIANT # define LOCALE_INVARIANT 0x007f #endif diff --git a/deps/uv/src/fs-poll.c b/deps/uv/src/fs-poll.c index 1bac1c568e36ca..44f6263a5832ec 100644 --- a/deps/uv/src/fs-poll.c +++ b/deps/uv/src/fs-poll.c @@ -139,6 +139,9 @@ int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) { struct poll_ctx* ctx; size_t required_len; + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + if (!uv_is_active((uv_handle_t*)handle)) { *size = 0; return UV_EINVAL; diff --git a/deps/uv/src/idna.c b/deps/uv/src/idna.c index efc5f283ce2ef9..5fcaf64c974a8a 100644 --- a/deps/uv/src/idna.c +++ b/deps/uv/src/idna.c @@ -393,7 +393,7 @@ void uv_wtf8_to_utf16(const char* source_ptr, code_point = uv__wtf8_decode1(&source_ptr); /* uv_wtf8_length_as_utf16 should have been called and checked first. */ assert(code_point >= 0); - if (code_point > 0x10000) { + if (code_point > 0xFFFF) { assert(code_point < 0x10FFFF); *w_target++ = (((code_point - 0x10000) >> 10) + 0xD800); *w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00; diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c index 45af50dcd04ea6..98d81cc7b6a4ed 100644 --- a/deps/uv/src/threadpool.c +++ b/deps/uv/src/threadpool.c @@ -59,6 +59,7 @@ static void worker(void* arg) { struct uv__queue* q; int is_slow_work; + uv_thread_setname("libuv-worker"); uv_sem_post((uv_sem_t*) arg); arg = NULL; diff --git a/deps/uv/src/unix/async.c b/deps/uv/src/unix/async.c index 0ff2669e30a628..8265a43ab47046 100644 --- a/deps/uv/src/unix/async.c +++ b/deps/uv/src/unix/async.c @@ -38,6 +38,34 @@ #include #endif +#if UV__KQUEUE_EVFILT_USER +static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT; +static int kqueue_evfilt_user_support = 1; + + +static void uv__kqueue_runtime_detection(void) { + int kq; + struct kevent ev[2]; + struct timespec timeout = {0, 0}; + + /* Perform the runtime detection to ensure that kqueue with + * EVFILT_USER actually works. */ + kq = kqueue(); + EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER, + EV_ADD | EV_CLEAR, 0, 0, 0); + EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER, + 0, NOTE_TRIGGER, 0, 0); + if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 || + ev[0].filter != EVFILT_USER || + ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT || + ev[0].flags & EV_ERROR) + /* If we wind up here, we can assume that EVFILT_USER is defined but + * broken on the current system. */ + kqueue_evfilt_user_support = 0; + uv__close(kq); +} +#endif + static void uv__async_send(uv_loop_t* loop); static int uv__async_start(uv_loop_t* loop); static void uv__cpu_relax(void); @@ -139,7 +167,11 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { assert(w == &loop->async_io_watcher); +#if UV__KQUEUE_EVFILT_USER + for (;!kqueue_evfilt_user_support;) { +#else for (;;) { +#endif r = read(w->fd, buf, sizeof(buf)); if (r == sizeof(buf)) @@ -195,6 +227,17 @@ static void uv__async_send(uv_loop_t* loop) { len = sizeof(val); fd = loop->async_io_watcher.fd; /* eventfd */ } +#elif UV__KQUEUE_EVFILT_USER + struct kevent ev; + + if (kqueue_evfilt_user_support) { + fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */ + EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0); + r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL); + if (r == 0) + return; + abort(); + } #endif do @@ -215,6 +258,9 @@ static void uv__async_send(uv_loop_t* loop) { static int uv__async_start(uv_loop_t* loop) { int pipefd[2]; int err; +#if UV__KQUEUE_EVFILT_USER + struct kevent ev; +#endif if (loop->async_io_watcher.fd != -1) return 0; @@ -226,6 +272,36 @@ static int uv__async_start(uv_loop_t* loop) { pipefd[0] = err; pipefd[1] = -1; +#elif UV__KQUEUE_EVFILT_USER + uv_once(&kqueue_runtime_detection_guard, uv__kqueue_runtime_detection); + if (kqueue_evfilt_user_support) { + /* In order not to break the generic pattern of I/O polling, a valid + * file descriptor is required to take up a room in loop->watchers, + * thus we create one for that, but this fd will not be actually used, + * it's just a placeholder and magic number which is going to be closed + * during the cleanup, as other FDs. */ + err = uv__open_cloexec("/dev/null", O_RDONLY); + if (err < 0) + return err; + + pipefd[0] = err; + pipefd[1] = -1; + + /* When using EVFILT_USER event to wake up the kqueue, this event must be + * registered beforehand. Otherwise, calling kevent() to issue an + * unregistered EVFILT_USER event will get an ENOENT. + * Since uv__async_send() may happen before uv__io_poll() with multi-threads, + * we can't defer this registration of EVFILT_USER event as we did for other + * events, but must perform it right away. */ + EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0); + err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL); + if (err < 0) + return UV__ERR(errno); + } else { + err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE); + if (err < 0) + return err; + } #else err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE); if (err < 0) @@ -236,6 +312,13 @@ static int uv__async_start(uv_loop_t* loop) { uv__io_start(loop, &loop->async_io_watcher, POLLIN); loop->async_wfd = pipefd[1]; +#if UV__KQUEUE_EVFILT_USER + /* Prevent the EVFILT_USER event from being added to kqueue redundantly + * and mistakenly later in uv__io_poll(). */ + if (kqueue_evfilt_user_support) + loop->async_io_watcher.events = loop->async_io_watcher.pevents; +#endif + return 0; } diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c index 0c52ccf2ad7b2d..61cbc0d027f04a 100644 --- a/deps/uv/src/unix/core.c +++ b/deps/uv/src/unix/core.c @@ -52,6 +52,8 @@ #endif #if defined(__APPLE__) +# include +# include # include # include #endif /* defined(__APPLE__) */ @@ -751,7 +753,7 @@ ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) { int uv_cwd(char* buffer, size_t* size) { char scratch[1 + UV__PATH_MAX]; - if (buffer == NULL || size == NULL) + if (buffer == NULL || size == NULL || *size == 0) return UV_EINVAL; /* Try to read directly into the user's buffer first... */ @@ -999,10 +1001,10 @@ int uv__fd_exists(uv_loop_t* loop, int fd) { } -int uv_getrusage(uv_rusage_t* rusage) { +static int uv__getrusage(int who, uv_rusage_t* rusage) { struct rusage usage; - if (getrusage(RUSAGE_SELF, &usage)) + if (getrusage(who, &usage)) return UV__ERR(errno); rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec; @@ -1041,6 +1043,48 @@ int uv_getrusage(uv_rusage_t* rusage) { } +int uv_getrusage(uv_rusage_t* rusage) { + return uv__getrusage(RUSAGE_SELF, rusage); +} + + +int uv_getrusage_thread(uv_rusage_t* rusage) { +#if defined(__APPLE__) + mach_msg_type_number_t count; + thread_basic_info_data_t info; + kern_return_t kr; + thread_t thread; + + thread = mach_thread_self(); + count = THREAD_BASIC_INFO_COUNT; + kr = thread_info(thread, + THREAD_BASIC_INFO, + (thread_info_t)&info, + &count); + + if (kr != KERN_SUCCESS) { + mach_port_deallocate(mach_task_self(), thread); + return UV_EINVAL; + } + + memset(rusage, 0, sizeof(*rusage)); + + rusage->ru_utime.tv_sec = info.user_time.seconds; + rusage->ru_utime.tv_usec = info.user_time.microseconds; + rusage->ru_stime.tv_sec = info.system_time.seconds; + rusage->ru_stime.tv_usec = info.system_time.microseconds; + + mach_port_deallocate(mach_task_self(), thread); + + return 0; + +#elif defined(RUSAGE_THREAD) + return uv__getrusage(RUSAGE_THREAD, rusage); +#endif /* defined(__APPLE__) */ + return UV_ENOTSUP; +} + + int uv__open_cloexec(const char* path, int flags) { #if defined(O_CLOEXEC) int fd; diff --git a/deps/uv/src/unix/darwin-proctitle.c b/deps/uv/src/unix/darwin-proctitle.c index 5288083ef04fd7..5e5642972a4df6 100644 --- a/deps/uv/src/unix/darwin-proctitle.c +++ b/deps/uv/src/unix/darwin-proctitle.c @@ -33,25 +33,9 @@ #include "darwin-stub.h" #endif - -static int uv__pthread_setname_np(const char* name) { - char namebuf[64]; /* MAXTHREADNAMESIZE */ - int err; - - strncpy(namebuf, name, sizeof(namebuf) - 1); - namebuf[sizeof(namebuf) - 1] = '\0'; - - err = pthread_setname_np(namebuf); - if (err) - return UV__ERR(err); - - return 0; -} - - int uv__set_process_title(const char* title) { #if TARGET_OS_IPHONE - return uv__pthread_setname_np(title); + return uv__thread_setname(title); #else CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef, const char*, @@ -177,7 +161,7 @@ int uv__set_process_title(const char* title) { goto out; } - uv__pthread_setname_np(title); /* Don't care if it fails. */ + uv__thread_setname(title); /* Don't care if it fails. */ err = 0; out: diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h index 8d586b0b64a96c..b1d2b21756da36 100644 --- a/deps/uv/src/unix/internal.h +++ b/deps/uv/src/unix/internal.h @@ -35,6 +35,10 @@ #include #include #include +#if defined(__APPLE__) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) +#include +#endif #define uv__msan_unpoison(p, n) \ do { \ @@ -323,6 +327,8 @@ void uv__prepare_close(uv_prepare_t* handle); void uv__process_close(uv_process_t* handle); void uv__stream_close(uv_stream_t* handle); void uv__tcp_close(uv_tcp_t* handle); +int uv__thread_setname(const char* name); +int uv__thread_getname(uv_thread_t* tid, char* name, size_t size); size_t uv__thread_stack_size(void); void uv__udp_close(uv_udp_t* handle); void uv__udp_finish_close(uv_udp_t* handle); @@ -504,4 +510,22 @@ int uv__get_constrained_cpu(uv__cpu_constraint* constraint); #endif #endif +#if defined(EVFILT_USER) && defined(NOTE_TRIGGER) +/* EVFILT_USER is available since OS X 10.6, DragonFlyBSD 4.0, + * FreeBSD 8.1, and NetBSD 10.0. + * + * Note that even though EVFILT_USER is defined on the current system, + * it may still fail to work at runtime somehow. In that case, we fall + * back to pipe-based signaling. + */ +#define UV__KQUEUE_EVFILT_USER 1 +/* Magic number of identifier used for EVFILT_USER during runtime detection. + * There are no Google hits for this number when I create it. That way, + * people will be directed here if this number gets printed due to some + * kqueue error and they google for help. */ +#define UV__KQUEUE_EVFILT_USER_IDENT 0x1e7e7711 +#else +#define UV__KQUEUE_EVFILT_USER 0 +#endif + #endif /* UV_UNIX_INTERNAL_H_ */ diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c index 66aa166f053f52..e0166c344b05c4 100644 --- a/deps/uv/src/unix/kqueue.c +++ b/deps/uv/src/unix/kqueue.c @@ -97,8 +97,7 @@ int uv__io_fork(uv_loop_t* loop) { int uv__io_check_fd(uv_loop_t* loop, int fd) { - struct kevent ev; - int rc; + struct kevent ev[2]; struct stat sb; #ifdef __APPLE__ char path[MAXPATHLEN]; @@ -133,17 +132,12 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) { } #endif - rc = 0; - EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); - if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) - rc = UV__ERR(errno); - - EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0); - if (rc == 0) - if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) - abort(); + EV_SET(ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); + EV_SET(ev + 1, fd, EVFILT_READ, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, ev, 2, NULL, 0, NULL)) + return UV__ERR(errno); - return rc; + return 0; } @@ -367,6 +361,17 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { continue; } +#if UV__KQUEUE_EVFILT_USER + if (ev->filter == EVFILT_USER) { + w = &loop->async_io_watcher; + assert(fd == w->fd); + uv__metrics_update_idle_time(loop); + w->cb(loop, w, w->events); + nevents++; + continue; + } +#endif + if (ev->filter == EVFILT_VNODE) { assert(w->events == POLLIN); assert(w->pevents == POLLIN); diff --git a/deps/uv/src/unix/linux.c b/deps/uv/src/unix/linux.c index 857a4ef8a6686f..763f5dd5917b44 100644 --- a/deps/uv/src/unix/linux.c +++ b/deps/uv/src/unix/linux.c @@ -455,7 +455,7 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) { } -static int uv__use_io_uring(void) { +static int uv__use_io_uring(uint32_t flags) { #if defined(__ANDROID_API__) return 0; /* Possibly available but blocked by seccomp. */ #elif defined(__arm__) && __SIZEOF_POINTER__ == 4 @@ -470,25 +470,27 @@ static int uv__use_io_uring(void) { char* val; int use; - use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); - - if (use == 0) { - use = uv__kernel_version() >= #if defined(__hppa__) - /* io_uring first supported on parisc in 6.1, functional in .51 */ - /* https://lore.kernel.org/all/cb912694-b1fe-dbb0-4d8c-d608f3526905@gmx.de/ */ - /* 6.1.51 */ 0x060133 -#else - /* Older kernels have a bug where the sqpoll thread uses 100% CPU. */ - /* 5.10.186 */ 0x050ABA + /* io_uring first supported on parisc in 6.1, functional in .51 + * https://lore.kernel.org/all/cb912694-b1fe-dbb0-4d8c-d608f3526905@gmx.de/ + */ + if (uv__kernel_version() < /*6.1.51*/0x060133) + return 0; #endif - ? 1 : -1; - /* But users can still enable it if they so desire. */ - val = getenv("UV_USE_IO_URING"); - if (val != NULL) - use = atoi(val) ? 1 : -1; + /* SQPOLL is all kinds of buggy but epoll batching should work fine. */ + if (0 == (flags & UV__IORING_SETUP_SQPOLL)) + return 1; + + /* Older kernels have a bug where the sqpoll thread uses 100% CPU. */ + if (uv__kernel_version() < /*5.10.186*/0x050ABA) + return 0; + + use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); + if (use == 0) { + val = getenv("UV_USE_IO_URING"); + use = val != NULL && atoi(val) > 0 ? 1 : -1; atomic_store_explicit(&use_io_uring, use, memory_order_relaxed); } @@ -518,7 +520,7 @@ static void uv__iou_init(int epollfd, sq = MAP_FAILED; sqe = MAP_FAILED; - if (!uv__use_io_uring()) + if (!uv__use_io_uring(flags)) return; kernel_version = uv__kernel_version(); @@ -766,14 +768,13 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, */ if (iou->ringfd == -2) { /* By default, the SQPOLL is not created. Enable only if the loop is - * configured with UV_LOOP_USE_IO_URING_SQPOLL. + * configured with UV_LOOP_USE_IO_URING_SQPOLL and the UV_USE_IO_URING + * environment variable is unset or a positive number. */ - if ((loop->flags & UV_LOOP_ENABLE_IO_URING_SQPOLL) == 0) { - iou->ringfd = -1; - return NULL; - } + if (loop->flags & UV_LOOP_ENABLE_IO_URING_SQPOLL) + if (uv__use_io_uring(UV__IORING_SETUP_SQPOLL)) + uv__iou_init(loop->backend_fd, iou, 64, UV__IORING_SETUP_SQPOLL); - uv__iou_init(loop->backend_fd, iou, 64, UV__IORING_SETUP_SQPOLL); if (iou->ringfd == -2) iou->ringfd = -1; /* "failed" */ } @@ -1713,16 +1714,22 @@ int uv_uptime(double* uptime) { int uv_cpu_info(uv_cpu_info_t** ci, int* count) { #if defined(__PPC__) static const char model_marker[] = "cpu\t\t: "; + static const char model_marker2[] = ""; #elif defined(__arm__) - static const char model_marker[] = "Processor\t: "; + static const char model_marker[] = "model name\t: "; + static const char model_marker2[] = "Processor\t: "; #elif defined(__aarch64__) static const char model_marker[] = "CPU part\t: "; + static const char model_marker2[] = ""; #elif defined(__mips__) static const char model_marker[] = "cpu model\t\t: "; + static const char model_marker2[] = ""; #elif defined(__loongarch__) static const char model_marker[] = "cpu family\t\t: "; + static const char model_marker2[] = ""; #else static const char model_marker[] = "model name\t: "; + static const char model_marker2[] = ""; #endif static const char parts[] = #ifdef __aarch64__ @@ -1821,14 +1828,22 @@ int uv_cpu_info(uv_cpu_info_t** ci, int* count) { if (1 != fscanf(fp, "processor\t: %u\n", &cpu)) break; /* Parse error. */ - found = 0; - while (!found && fgets(buf, sizeof(buf), fp)) - found = !strncmp(buf, model_marker, sizeof(model_marker) - 1); + while (fgets(buf, sizeof(buf), fp)) { + if (!strncmp(buf, model_marker, sizeof(model_marker) - 1)) { + p = buf + sizeof(model_marker) - 1; + goto parts; + } + if (!*model_marker2) + continue; + if (!strncmp(buf, model_marker2, sizeof(model_marker2) - 1)) { + p = buf + sizeof(model_marker2) - 1; + goto parts; + } + } - if (!found) - goto next; + goto next; /* Not found. */ - p = buf + sizeof(model_marker) - 1; +parts: n = (int) strcspn(p, "\n"); /* arm64: translate CPU part code to model name. */ diff --git a/deps/uv/src/unix/pipe.c b/deps/uv/src/unix/pipe.c index 1f9acfac41e9c5..bd57b17fb0367a 100644 --- a/deps/uv/src/unix/pipe.c +++ b/deps/uv/src/unix/pipe.c @@ -360,6 +360,9 @@ static int uv__pipe_getsockpeername(const uv_pipe_t* handle, char* p; int err; + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + addrlen = sizeof(sa); memset(&sa, 0, addrlen); err = uv__getsockpeername((const uv_handle_t*) handle, @@ -444,7 +447,7 @@ uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) { int uv_pipe_chmod(uv_pipe_t* handle, int mode) { unsigned desired_mode; struct stat pipe_stat; - char* name_buffer; + char name_buffer[1 + UV__PATH_MAX]; size_t name_len; int r; @@ -457,26 +460,14 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) { return UV_EINVAL; /* Unfortunately fchmod does not work on all platforms, we will use chmod. */ - name_len = 0; - r = uv_pipe_getsockname(handle, NULL, &name_len); - if (r != UV_ENOBUFS) - return r; - - name_buffer = uv__malloc(name_len); - if (name_buffer == NULL) - return UV_ENOMEM; - + name_len = sizeof(name_buffer); r = uv_pipe_getsockname(handle, name_buffer, &name_len); - if (r != 0) { - uv__free(name_buffer); + if (r != 0) return r; - } /* stat must be used as fstat has a bug on Darwin */ - if (uv__stat(name_buffer, &pipe_stat) == -1) { - uv__free(name_buffer); - return -errno; - } + if (uv__stat(name_buffer, &pipe_stat) == -1) + return UV__ERR(errno); desired_mode = 0; if (mode & UV_READABLE) @@ -485,15 +476,12 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) { desired_mode |= S_IWUSR | S_IWGRP | S_IWOTH; /* Exit early if pipe already has desired mode. */ - if ((pipe_stat.st_mode & desired_mode) == desired_mode) { - uv__free(name_buffer); + if ((pipe_stat.st_mode & desired_mode) == desired_mode) return 0; - } pipe_stat.st_mode |= desired_mode; r = chmod(name_buffer, pipe_stat.st_mode); - uv__free(name_buffer); return r != -1 ? 0 : UV__ERR(errno); } diff --git a/deps/uv/src/unix/thread.c b/deps/uv/src/unix/thread.c index f05e6fe0f7dd5a..e51c290466d08b 100644 --- a/deps/uv/src/unix/thread.c +++ b/deps/uv/src/unix/thread.c @@ -23,6 +23,9 @@ #include "internal.h" #include +#ifdef __OpenBSD__ +#include +#endif #include #include @@ -126,6 +129,12 @@ int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { return uv_thread_create_ex(tid, ¶ms, entry, arg); } + +int uv_thread_detach(uv_thread_t *tid) { + return UV__ERR(pthread_detach(*tid)); +} + + int uv_thread_create_ex(uv_thread_t* tid, const uv_thread_options_t* params, void (*entry)(void *arg), @@ -291,6 +300,18 @@ int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { return pthread_equal(*t1, *t2); } +int uv_thread_setname(const char* name) { + if (name == NULL) + return UV_EINVAL; + return uv__thread_setname(name); +} + +int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) { + if (name == NULL || size == 0) + return UV_EINVAL; + + return uv__thread_getname(tid, name, size); +} int uv_mutex_init(uv_mutex_t* mutex) { #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) @@ -875,3 +896,80 @@ void uv_key_set(uv_key_t* key, void* value) { if (pthread_setspecific(*key, value)) abort(); } + +#if defined(_AIX) || defined(__MVS__) || defined(__PASE__) +int uv__thread_setname(const char* name) { + return UV_ENOSYS; +} +#elif defined(__APPLE__) +int uv__thread_setname(const char* name) { + char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + int err = pthread_setname_np(namebuf); + if (err) + return UV__ERR(errno); + return 0; +} +#elif defined(__NetBSD__) +int uv__thread_setname(const char* name) { + char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + return UV__ERR(pthread_setname_np(pthread_self(), "%s", namebuf)); +} +#elif defined(__OpenBSD__) +int uv__thread_setname(const char* name) { + char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + pthread_set_name_np(pthread_self(), namebuf); + return 0; +} +#else +int uv__thread_setname(const char* name) { + char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + return UV__ERR(pthread_setname_np(pthread_self(), namebuf)); +} +#endif + +#if (defined(__ANDROID_API__) && __ANDROID_API__ < 26) || \ + defined(_AIX) || \ + defined(__MVS__) || \ + defined(__PASE__) +int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { + return UV_ENOSYS; +} +#elif defined(__OpenBSD__) +int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { + char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; + pthread_get_name_np(*tid, thread_name, sizeof(thread_name)); + strncpy(name, thread_name, size - 1); + name[size - 1] = '\0'; + return 0; +} +#elif defined(__APPLE__) +int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { + char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; + if (pthread_getname_np(*tid, thread_name, sizeof(thread_name)) != 0) + return UV__ERR(errno); + + strncpy(name, thread_name, size - 1); + name[size - 1] = '\0'; + return 0; +} +#else +int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) { + int r; + char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; + r = pthread_getname_np(*tid, thread_name, sizeof(thread_name)); + if (r != 0) + return UV__ERR(r); + + strncpy(name, thread_name, size - 1); + name[size - 1] = '\0'; + return 0; +} +#endif diff --git a/deps/uv/src/unix/udp.c b/deps/uv/src/unix/udp.c index f6640fc7231863..67c01f7dce8e18 100644 --- a/deps/uv/src/unix/udp.c +++ b/deps/uv/src/unix/udp.c @@ -47,6 +47,10 @@ static void uv__udp_sendmsg(uv_udp_t* handle); static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, int domain, unsigned int flags); +static int uv__udp_sendmsg1(int fd, + const uv_buf_t* bufs, + unsigned int nbufs, + const struct sockaddr* addr); void uv__udp_close(uv_udp_t* handle) { @@ -282,169 +286,6 @@ static void uv__udp_recvmsg(uv_udp_t* handle) { && handle->recv_cb != NULL); } -static void uv__udp_sendmsg_one(uv_udp_t* handle, uv_udp_send_t* req) { - struct uv__queue* q; - struct msghdr h; - ssize_t size; - - for (;;) { - memset(&h, 0, sizeof h); - if (req->addr.ss_family == AF_UNSPEC) { - h.msg_name = NULL; - h.msg_namelen = 0; - } else { - h.msg_name = &req->addr; - if (req->addr.ss_family == AF_INET6) - h.msg_namelen = sizeof(struct sockaddr_in6); - else if (req->addr.ss_family == AF_INET) - h.msg_namelen = sizeof(struct sockaddr_in); - else if (req->addr.ss_family == AF_UNIX) - h.msg_namelen = sizeof(struct sockaddr_un); - else { - assert(0 && "unsupported address family"); - abort(); - } - } - h.msg_iov = (struct iovec*) req->bufs; - h.msg_iovlen = req->nbufs; - - do - size = sendmsg(handle->io_watcher.fd, &h, 0); - while (size == -1 && errno == EINTR); - - if (size == -1) - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) - return; - - req->status = (size == -1 ? UV__ERR(errno) : size); - - /* Sending a datagram is an atomic operation: either all data - * is written or nothing is (and EMSGSIZE is raised). That is - * why we don't handle partial writes. Just pop the request - * off the write queue and onto the completed queue, done. - */ - uv__queue_remove(&req->queue); - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); - uv__io_feed(handle->loop, &handle->io_watcher); - - if (uv__queue_empty(&handle->write_queue)) - return; - - q = uv__queue_head(&handle->write_queue); - req = uv__queue_data(q, uv_udp_send_t, queue); - } -} - -#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) -static void uv__udp_sendmsg_many(uv_udp_t* handle) { - uv_udp_send_t* req; - struct mmsghdr h[20]; - struct mmsghdr* p; - struct uv__queue* q; - ssize_t npkts; - size_t pkts; - size_t i; - -write_queue_drain: - for (pkts = 0, q = uv__queue_head(&handle->write_queue); - pkts < ARRAY_SIZE(h) && q != &handle->write_queue; - ++pkts, q = uv__queue_head(q)) { - req = uv__queue_data(q, uv_udp_send_t, queue); - - p = &h[pkts]; - memset(p, 0, sizeof(*p)); - if (req->addr.ss_family == AF_UNSPEC) { - p->msg_hdr.msg_name = NULL; - p->msg_hdr.msg_namelen = 0; - } else { - p->msg_hdr.msg_name = &req->addr; - if (req->addr.ss_family == AF_INET6) - p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6); - else if (req->addr.ss_family == AF_INET) - p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in); - else if (req->addr.ss_family == AF_UNIX) - p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un); - else { - assert(0 && "unsupported address family"); - abort(); - } - } - h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs; - h[pkts].msg_hdr.msg_iovlen = req->nbufs; - } - -#if defined(__APPLE__) - do - npkts = sendmsg_x(handle->io_watcher.fd, h, pkts, MSG_DONTWAIT); - while (npkts == -1 && errno == EINTR); -#else - do - npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0); - while (npkts == -1 && errno == EINTR); -#endif - - if (npkts < 1) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) - return; - for (i = 0, q = uv__queue_head(&handle->write_queue); - i < pkts && q != &handle->write_queue; - ++i, q = uv__queue_head(&handle->write_queue)) { - req = uv__queue_data(q, uv_udp_send_t, queue); - req->status = UV__ERR(errno); - uv__queue_remove(&req->queue); - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); - } - uv__io_feed(handle->loop, &handle->io_watcher); - return; - } - - /* Safety: npkts known to be >0 below. Hence cast from ssize_t - * to size_t safe. - */ - for (i = 0, q = uv__queue_head(&handle->write_queue); - i < (size_t)npkts && q != &handle->write_queue; - ++i, q = uv__queue_head(&handle->write_queue)) { - req = uv__queue_data(q, uv_udp_send_t, queue); - req->status = req->bufs[0].len; - - /* Sending a datagram is an atomic operation: either all data - * is written or nothing is (and EMSGSIZE is raised). That is - * why we don't handle partial writes. Just pop the request - * off the write queue and onto the completed queue, done. - */ - uv__queue_remove(&req->queue); - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); - } - - /* couldn't batch everything, continue sending (jump to avoid stack growth) */ - if (!uv__queue_empty(&handle->write_queue)) - goto write_queue_drain; - - uv__io_feed(handle->loop, &handle->io_watcher); -} -#endif /* __linux__ || ____FreeBSD__ || __APPLE__ */ - -static void uv__udp_sendmsg(uv_udp_t* handle) { - struct uv__queue* q; - uv_udp_send_t* req; - - if (uv__queue_empty(&handle->write_queue)) - return; - - q = uv__queue_head(&handle->write_queue); - req = uv__queue_data(q, uv_udp_send_t, queue); - -#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) - /* Use sendmmsg() if this send request contains more than one datagram OR - * there is more than one send request (because that automatically implies - * there is more than one datagram.) - */ - if (req->nbufs != 1 || &handle->write_queue != uv__queue_next(&req->queue)) - return uv__udp_sendmsg_many(handle); -#endif - - return uv__udp_sendmsg_one(handle, req); -} /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional * refinements for programs that use multicast. Therefore we preferentially @@ -743,11 +584,11 @@ int uv__udp_send(uv_udp_send_t* req, empty_queue = (handle->send_queue_count == 0); uv__req_init(handle->loop, req, UV_UDP_SEND); - assert(addrlen <= sizeof(req->addr)); + assert(addrlen <= sizeof(req->u.storage)); if (addr == NULL) - req->addr.ss_family = AF_UNSPEC; + req->u.storage.ss_family = AF_UNSPEC; else - memcpy(&req->addr, addr, addrlen); + memcpy(&req->u.storage, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->nbufs = nbufs; @@ -790,10 +631,9 @@ int uv__udp_try_send(uv_udp_t* handle, const struct sockaddr* addr, unsigned int addrlen) { int err; - struct msghdr h; - ssize_t size; - assert(nbufs > 0); + if (nbufs < 1) + return UV_EINVAL; /* already sending a message */ if (handle->send_queue_count != 0) @@ -807,24 +647,11 @@ int uv__udp_try_send(uv_udp_t* handle, assert(handle->flags & UV_HANDLE_UDP_CONNECTED); } - memset(&h, 0, sizeof h); - h.msg_name = (struct sockaddr*) addr; - h.msg_namelen = addrlen; - h.msg_iov = (struct iovec*) bufs; - h.msg_iovlen = nbufs; + err = uv__udp_sendmsg1(handle->io_watcher.fd, bufs, nbufs, addr); + if (err > 0) + return uv__count_bufs(bufs, nbufs); - do { - size = sendmsg(handle->io_watcher.fd, &h, 0); - } while (size == -1 && errno == EINTR); - - if (size == -1) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) - return UV_EAGAIN; - else - return UV__ERR(errno); - } - - return size; + return err; } @@ -1401,3 +1228,191 @@ int uv__udp_recv_stop(uv_udp_t* handle) { return 0; } + + +static int uv__udp_prep_pkt(struct msghdr* h, + const uv_buf_t* bufs, + const unsigned int nbufs, + const struct sockaddr* addr) { + memset(h, 0, sizeof(*h)); + h->msg_name = (void*) addr; + h->msg_iov = (void*) bufs; + h->msg_iovlen = nbufs; + if (addr == NULL) + return 0; + switch (addr->sa_family) { + case AF_INET: + h->msg_namelen = sizeof(struct sockaddr_in); + return 0; + case AF_INET6: + h->msg_namelen = sizeof(struct sockaddr_in6); + return 0; + case AF_UNIX: + h->msg_namelen = sizeof(struct sockaddr_un); + return 0; + case AF_UNSPEC: + h->msg_name = NULL; + return 0; + } + return UV_EINVAL; +} + + +static int uv__udp_sendmsg1(int fd, + const uv_buf_t* bufs, + unsigned int nbufs, + const struct sockaddr* addr) { + struct msghdr h; + int r; + + if ((r = uv__udp_prep_pkt(&h, bufs, nbufs, addr))) + return r; + + do + r = sendmsg(fd, &h, 0); + while (r == -1 && errno == EINTR); + + if (r < 0) { + r = UV__ERR(errno); + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) + r = UV_EAGAIN; + return r; + } + + /* UDP sockets don't EOF so we don't have to handle r=0 specially, + * that only happens when the input was a zero-sized buffer. + */ + return 1; +} + + +static int uv__udp_sendmsgv(int fd, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/]) { + unsigned int i; + int nsent; + int r; + + r = 0; + nsent = 0; + +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) + if (count > 1) { + for (i = 0; i < count; /*empty*/) { + struct mmsghdr m[20]; + unsigned int n; + + for (n = 0; i < count && n < ARRAY_SIZE(m); i++, n++) + if ((r = uv__udp_prep_pkt(&m[n].msg_hdr, bufs[i], nbufs[i], addrs[i]))) + goto exit; + + do +#if defined(__APPLE__) + r = sendmsg_x(fd, m, n, MSG_DONTWAIT); +#else + r = sendmmsg(fd, m, n, 0); +#endif + while (r == -1 && errno == EINTR); + + if (r < 1) + goto exit; + + nsent += r; + i += r; + } + + goto exit; + } +#endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) */ + + for (i = 0; i < count; i++, nsent++) + if ((r = uv__udp_sendmsg1(fd, bufs[i], nbufs[i], addrs[i]))) + goto exit; /* goto to avoid unused label warning. */ + +exit: + + if (nsent > 0) + return nsent; + + if (r < 0) { + r = UV__ERR(errno); + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) + r = UV_EAGAIN; + } + + return r; +} + + +static void uv__udp_sendmsg(uv_udp_t* handle) { + static const int N = 20; + struct sockaddr* addrs[N]; + unsigned int nbufs[N]; + uv_buf_t* bufs[N]; + struct uv__queue* q; + uv_udp_send_t* req; + int n; + + if (uv__queue_empty(&handle->write_queue)) + return; + +again: + n = 0; + q = uv__queue_head(&handle->write_queue); + do { + req = uv__queue_data(q, uv_udp_send_t, queue); + addrs[n] = &req->u.addr; + nbufs[n] = req->nbufs; + bufs[n] = req->bufs; + q = uv__queue_next(q); + n++; + } while (n < N && q != &handle->write_queue); + + n = uv__udp_sendmsgv(handle->io_watcher.fd, n, bufs, nbufs, addrs); + while (n > 0) { + q = uv__queue_head(&handle->write_queue); + req = uv__queue_data(q, uv_udp_send_t, queue); + req->status = uv__count_bufs(req->bufs, req->nbufs); + uv__queue_remove(&req->queue); + uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); + n--; + } + + if (n == 0) { + if (uv__queue_empty(&handle->write_queue)) + goto feed; + goto again; + } + + if (n == UV_EAGAIN) + return; + + /* Register the error against first request in queue because that + * is the request that uv__udp_sendmsgv tried but failed to send, + * because if it did send any requests, it won't return an error. + */ + q = uv__queue_head(&handle->write_queue); + req = uv__queue_data(q, uv_udp_send_t, queue); + req->status = n; + uv__queue_remove(&req->queue); + uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); +feed: + uv__io_feed(handle->loop, &handle->io_watcher); +} + + +int uv__udp_try_send2(uv_udp_t* handle, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/]) { + int fd; + + fd = handle->io_watcher.fd; + if (fd == -1) + return UV_EINVAL; + + return uv__udp_sendmsgv(fd, count, bufs, nbufs, addrs); +} diff --git a/deps/uv/src/uv-common.c b/deps/uv/src/uv-common.c index 2200fe3f0a41e2..60ff56b9dd7391 100644 --- a/deps/uv/src/uv-common.c +++ b/deps/uv/src/uv-common.c @@ -514,6 +514,25 @@ int uv_udp_try_send(uv_udp_t* handle, } +int uv_udp_try_send2(uv_udp_t* handle, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/], + unsigned int flags) { + if (count < 1) + return UV_EINVAL; + + if (flags != 0) + return UV_EINVAL; + + if (handle->send_queue_count > 0) + return UV_EAGAIN; + + return uv__udp_try_send2(handle, count, bufs, nbufs, addrs); +} + + int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) { @@ -644,6 +663,9 @@ int uv_send_buffer_size(uv_handle_t* handle, int *value) { int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) { size_t required_len; + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + if (!uv__is_active(handle)) { *size = 0; return UV_EINVAL; diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h index 4baede2e506ee1..372f0c4b3ac39e 100644 --- a/deps/uv/src/uv-common.h +++ b/deps/uv/src/uv-common.h @@ -191,6 +191,12 @@ int uv__udp_try_send(uv_udp_t* handle, const struct sockaddr* addr, unsigned int addrlen); +int uv__udp_try_send2(uv_udp_t* handle, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/]); + int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb, uv_udp_recv_cb recv_cb); @@ -428,4 +434,18 @@ struct uv__loop_internal_fields_s { #endif /* __linux__ */ }; +#if defined(_WIN32) +# define UV_PTHREAD_MAX_NAMELEN_NP 32767 +#elif defined(__APPLE__) +# define UV_PTHREAD_MAX_NAMELEN_NP 64 +#elif defined(__NetBSD__) || defined(__illumos__) +# define UV_PTHREAD_MAX_NAMELEN_NP PTHREAD_MAX_NAMELEN_NP +#elif defined (__linux__) +# define UV_PTHREAD_MAX_NAMELEN_NP 16 +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) +# define UV_PTHREAD_MAX_NAMELEN_NP (MAXCOMLEN + 1) +#else +# define UV_PTHREAD_MAX_NAMELEN_NP 16 +#endif + #endif /* UV_COMMON_H_ */ diff --git a/deps/uv/src/win/core.c b/deps/uv/src/win/core.c index e9885a0f1ff389..bc63b06673ac1a 100644 --- a/deps/uv/src/win/core.c +++ b/deps/uv/src/win/core.c @@ -423,97 +423,6 @@ int uv_backend_timeout(const uv_loop_t* loop) { } -static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { - uv__loop_internal_fields_t* lfields; - DWORD bytes; - ULONG_PTR key; - OVERLAPPED* overlapped; - uv_req_t* req; - int repeat; - uint64_t timeout_time; - uint64_t user_timeout; - int reset_timeout; - - lfields = uv__get_internal_fields(loop); - timeout_time = loop->time + timeout; - - if (lfields->flags & UV_METRICS_IDLE_TIME) { - reset_timeout = 1; - user_timeout = timeout; - timeout = 0; - } else { - reset_timeout = 0; - } - - for (repeat = 0; ; repeat++) { - /* Only need to set the provider_entry_time if timeout != 0. The function - * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. - */ - if (timeout != 0) - uv__metrics_set_provider_entry_time(loop); - - /* Store the current timeout in a location that's globally accessible so - * other locations like uv__work_done() can determine whether the queue - * of events in the callback were waiting when poll was called. - */ - lfields->current_timeout = timeout; - - GetQueuedCompletionStatus(loop->iocp, - &bytes, - &key, - &overlapped, - timeout); - - if (reset_timeout != 0) { - if (overlapped && timeout == 0) - uv__metrics_inc_events_waiting(loop, 1); - timeout = user_timeout; - reset_timeout = 0; - } - - /* Placed here because on success the loop will break whether there is an - * empty package or not, or if GetQueuedCompletionStatus returned early then - * the timeout will be updated and the loop will run again. In either case - * the idle time will need to be updated. - */ - uv__metrics_update_idle_time(loop); - - if (overlapped) { - uv__metrics_inc_events(loop, 1); - - /* Package was dequeued */ - req = uv__overlapped_to_req(overlapped); - uv__insert_pending_req(loop, req); - - /* Some time might have passed waiting for I/O, - * so update the loop time here. - */ - uv_update_time(loop); - } else if (GetLastError() != WAIT_TIMEOUT) { - /* Serious error */ - uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); - } else if (timeout > 0) { - /* GetQueuedCompletionStatus can occasionally return a little early. - * Make sure that the desired timeout target time is reached. - */ - uv_update_time(loop); - if (timeout_time > loop->time) { - timeout = (DWORD)(timeout_time - loop->time); - /* The first call to GetQueuedCompletionStatus should return very - * close to the target time and the second should reach it, but - * this is not stated in the documentation. To make sure a busy - * loop cannot happen, the timeout is increased exponentially - * starting on the third round. - */ - timeout += repeat ? (1 << (repeat - 1)) : 0; - continue; - } - } - break; - } -} - - static void uv__poll(uv_loop_t* loop, DWORD timeout) { uv__loop_internal_fields_t* lfields; BOOL success; @@ -553,12 +462,12 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { */ lfields->current_timeout = timeout; - success = pGetQueuedCompletionStatusEx(loop->iocp, - overlappeds, - ARRAY_SIZE(overlappeds), - &count, - timeout, - FALSE); + success = GetQueuedCompletionStatusEx(loop->iocp, + overlappeds, + ARRAY_SIZE(overlappeds), + &count, + timeout, + FALSE); if (reset_timeout != 0) { timeout = user_timeout; @@ -566,7 +475,7 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { } /* Placed here because on success the loop will break whether there is an - * empty package or not, or if pGetQueuedCompletionStatusEx returned early + * empty package or not, or if GetQueuedCompletionStatusEx returned early * then the timeout will be updated and the loop will run again. In either * case the idle time will need to be updated. */ @@ -647,10 +556,7 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { uv__metrics_inc_loop_count(loop); - if (pGetQueuedCompletionStatusEx) - uv__poll(loop, timeout); - else - uv__poll_wine(loop, timeout); + uv__poll(loop, timeout); /* Process immediate callbacks (e.g. write_cb) a small fixed number of * times to avoid loop starvation.*/ diff --git a/deps/uv/src/win/fs-event.c b/deps/uv/src/win/fs-event.c index 7ab407e05345f9..1bbb8c52be2d82 100644 --- a/deps/uv/src/win/fs-event.c +++ b/deps/uv/src/win/fs-event.c @@ -253,6 +253,8 @@ int uv_fs_event_start(uv_fs_event_t* handle, } dir_to_watch = dir; + uv__free(short_path); + short_path = NULL; uv__free(pathw); pathw = NULL; } @@ -577,6 +579,8 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req, info.DeletePending) { uv__convert_utf16_to_utf8(handle->dirw, -1, &filename); handle->cb(handle, filename, UV_RENAME, 0); + uv__free(filename); + filename = NULL; } else { handle->cb(handle, NULL, 0, uv_translate_sys_error(err)); } diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c index f2215bb3082178..a4742aa2ec13fd 100644 --- a/deps/uv/src/win/fs.c +++ b/deps/uv/src/win/fs.c @@ -58,6 +58,19 @@ #define FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE 0x0010 #endif /* FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE */ +NTSTATUS uv__RtlUnicodeStringInit( + PUNICODE_STRING DestinationString, + PWSTR SourceString, + size_t SourceStringLen +) { + if (SourceStringLen > 0x7FFF) + return STATUS_INVALID_PARAMETER; + DestinationString->MaximumLength = DestinationString->Length = + SourceStringLen * sizeof(SourceString[0]); + DestinationString->Buffer = SourceString; + return STATUS_SUCCESS; +} + #define INIT(subtype) \ do { \ if (req == NULL) \ @@ -1689,12 +1702,12 @@ INLINE static fs__stat_path_return_t fs__stat_path(WCHAR* path, uv_stat_t* statbuf, int do_lstat) { FILE_STAT_BASIC_INFORMATION stat_info; - // Check if the new fast API is available. + /* Check if the new fast API is available. */ if (!pGetFileInformationByName) { return FS__STAT_PATH_TRY_SLOW; } - // Check if the API call fails. + /* Check if the API call fails. */ if (!pGetFileInformationByName(path, FileStatBasicByNameInfo, &stat_info, sizeof(stat_info))) { switch(GetLastError()) { @@ -1708,7 +1721,7 @@ INLINE static fs__stat_path_return_t fs__stat_path(WCHAR* path, return FS__STAT_PATH_TRY_SLOW; } - // A file handle is needed to get st_size for links. + /* A file handle is needed to get st_size for links. */ if ((stat_info.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { return FS__STAT_PATH_TRY_SLOW; } @@ -1802,7 +1815,6 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, * detect this failure and retry without do_lstat if appropriate. */ if (fs__readlink_handle(handle, NULL, &target_length) != 0) { - fs__stat_assign_statbuf(statbuf, stat_info, do_lstat); return -1; } stat_info.EndOfFile.QuadPart = target_length; @@ -1941,6 +1953,179 @@ INLINE static void fs__stat_prepare_path(WCHAR* pathw) { } } +INLINE static DWORD fs__stat_directory(WCHAR* path, uv_stat_t* statbuf, + int do_lstat, DWORD ret_error) { + HANDLE handle = INVALID_HANDLE_VALUE; + FILE_STAT_BASIC_INFORMATION stat_info; + FILE_ID_FULL_DIR_INFORMATION dir_info; + FILE_FS_VOLUME_INFORMATION volume_info; + FILE_FS_DEVICE_INFORMATION device_info; + IO_STATUS_BLOCK io_status; + NTSTATUS nt_status; + WCHAR* path_dirpath = NULL; + WCHAR* path_filename = NULL; + UNICODE_STRING FileMask; + size_t len; + size_t split; + WCHAR splitchar; + int includes_name; + + /* AKA strtok or wcscspn, in reverse. */ + len = wcslen(path); + split = len; + + includes_name = 0; + while (split > 0 && path[split - 1] != L'\\' && path[split - 1] != L'/' && + path[split - 1] != L':') { + /* check if the path contains a character other than /,\,:,. */ + if (path[split-1] != '.') { + includes_name = 1; + } + split--; + } + /* If the path is a relative path with a file name or a folder name */ + if (split == 0 && includes_name) { + path_dirpath = L"."; + /* If there is a slash or a backslash */ + } else if (path[split - 1] == L'\\' || path[split - 1] == L'/') { + path_dirpath = path; + /* If there is no filename, consider it as a relative folder path */ + if (!includes_name) { + split = len; + /* Else, split it */ + } else { + splitchar = path[split - 1]; + path[split - 1] = L'\0'; + } + /* e.g. "..", "c:" */ + } else { + path_dirpath = path; + split = len; + } + path_filename = &path[split]; + + len = 0; + while (1) { + if (path_filename[len] == L'\0') + break; + if (path_filename[len] == L'*' || path_filename[len] == L'?' || + path_filename[len] == L'>' || path_filename[len] == L'<' || + path_filename[len] == L'"') { + ret_error = ERROR_INVALID_NAME; + goto cleanup; + } + len++; + } + + /* Get directory handle */ + handle = CreateFileW(path_dirpath, + FILE_LIST_DIRECTORY, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, + NULL); + + if (handle == INVALID_HANDLE_VALUE) { + ret_error = GetLastError(); + goto cleanup; + } + + /* Get files in the directory */ + nt_status = uv__RtlUnicodeStringInit(&FileMask, path_filename, len); + if (!NT_SUCCESS(nt_status)) { + ret_error = pRtlNtStatusToDosError(nt_status); + goto cleanup; + } + nt_status = pNtQueryDirectoryFile(handle, + NULL, + NULL, + NULL, + &io_status, + &dir_info, + sizeof(dir_info), + FileIdFullDirectoryInformation, + TRUE, + &FileMask, + TRUE); + + /* Buffer overflow (a warning status code) is expected here since there isn't + * enough space to store the FileName, and actually indicates success. */ + if (!NT_SUCCESS(nt_status) && nt_status != STATUS_BUFFER_OVERFLOW) { + if (nt_status == STATUS_NO_MORE_FILES) + ret_error = ERROR_PATH_NOT_FOUND; + else + ret_error = pRtlNtStatusToDosError(nt_status); + goto cleanup; + } + + /* Assign values to stat_info */ + memset(&stat_info, 0, sizeof(FILE_STAT_BASIC_INFORMATION)); + stat_info.FileAttributes = dir_info.FileAttributes; + stat_info.CreationTime.QuadPart = dir_info.CreationTime.QuadPart; + stat_info.LastAccessTime.QuadPart = dir_info.LastAccessTime.QuadPart; + stat_info.LastWriteTime.QuadPart = dir_info.LastWriteTime.QuadPart; + if (stat_info.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + /* A file handle is needed to get st_size for the link (from + * FSCTL_GET_REPARSE_POINT), which is required by posix, but we are here + * because getting the file handle failed. We could get just the + * ReparsePointTag by querying FILE_ID_EXTD_DIR_INFORMATION instead to make + * sure this really is a link before giving up here on the uv_fs_stat call, + * but that doesn't seem essential. */ + if (!do_lstat) + goto cleanup; + stat_info.EndOfFile.QuadPart = 0; + stat_info.AllocationSize.QuadPart = 0; + } else { + stat_info.EndOfFile.QuadPart = dir_info.EndOfFile.QuadPart; + stat_info.AllocationSize.QuadPart = dir_info.AllocationSize.QuadPart; + } + stat_info.ChangeTime.QuadPart = dir_info.ChangeTime.QuadPart; + stat_info.FileId.QuadPart = dir_info.FileId.QuadPart; + + /* Finish up by getting device info from the directory handle, + * since files presumably must live on their device. */ + nt_status = pNtQueryVolumeInformationFile(handle, + &io_status, + &volume_info, + sizeof volume_info, + FileFsVolumeInformation); + + /* Buffer overflow (a warning status code) is expected here. */ + if (io_status.Status == STATUS_NOT_IMPLEMENTED) { + stat_info.VolumeSerialNumber.QuadPart = 0; + } else if (NT_ERROR(nt_status)) { + ret_error = pRtlNtStatusToDosError(nt_status); + goto cleanup; + } else { + stat_info.VolumeSerialNumber.QuadPart = volume_info.VolumeSerialNumber; + } + + nt_status = pNtQueryVolumeInformationFile(handle, + &io_status, + &device_info, + sizeof device_info, + FileFsDeviceInformation); + + /* Buffer overflow (a warning status code) is expected here. */ + if (NT_ERROR(nt_status)) { + ret_error = pRtlNtStatusToDosError(nt_status); + goto cleanup; + } + + stat_info.DeviceType = device_info.DeviceType; + stat_info.NumberOfLinks = 1; /* No way to recover this info. */ + + fs__stat_assign_statbuf(statbuf, stat_info, do_lstat); + ret_error = 0; + +cleanup: + if (split != 0) + path[split - 1] = splitchar; + if (handle != INVALID_HANDLE_VALUE) + CloseHandle(handle); + return ret_error; +} INLINE static DWORD fs__stat_impl_from_path(WCHAR* path, int do_lstat, @@ -1949,7 +2134,7 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path, DWORD flags; DWORD ret; - // If new API exists, try to use it. + /* If new API exists, try to use it. */ switch (fs__stat_path(path, statbuf, do_lstat)) { case FS__STAT_PATH_SUCCESS: return 0; @@ -1959,7 +2144,7 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path, break; } - // If the new API does not exist, use the old API. + /* If the new API does not exist, use the old API. */ flags = FILE_FLAG_BACKUP_SEMANTICS; if (do_lstat) flags |= FILE_FLAG_OPEN_REPARSE_POINT; @@ -1972,8 +2157,12 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path, flags, NULL); - if (handle == INVALID_HANDLE_VALUE) - return GetLastError(); + if (handle == INVALID_HANDLE_VALUE) { + ret = GetLastError(); + if (ret != ERROR_ACCESS_DENIED && ret != ERROR_SHARING_VIOLATION) + return ret; + return fs__stat_directory(path, statbuf, do_lstat, ret); + } if (fs__stat_handle(handle, statbuf, do_lstat) != 0) ret = GetLastError(); diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c index d46ecb9fc702e6..d05bfd28aec8b9 100644 --- a/deps/uv/src/win/pipe.c +++ b/deps/uv/src/win/pipe.c @@ -1161,9 +1161,9 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) { err = uv__tcp_xfer_import( (uv_tcp_t*) client, item->xfer_type, &item->xfer_info); - + uv__free(item); - + if (err != 0) return err; @@ -1738,7 +1738,7 @@ static DWORD uv__pipe_get_ipc_remote_pid(uv_pipe_t* handle) { GetNamedPipeServerProcessId(handle->handle, pid); } } - + return *pid; } @@ -2602,6 +2602,9 @@ int uv_pipe_pending_count(uv_pipe_t* handle) { int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) { + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + if (handle->flags & UV_HANDLE_BOUND) return uv__pipe_getname(handle, buffer, size); @@ -2616,6 +2619,9 @@ int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) { int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) { + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + /* emulate unix behaviour */ if (handle->flags & UV_HANDLE_BOUND) return UV_ENOTCONN; diff --git a/deps/uv/src/win/thread.c b/deps/uv/src/win/thread.c index bf39b88633b0d8..436846a716807e 100644 --- a/deps/uv/src/win/thread.c +++ b/deps/uv/src/win/thread.c @@ -95,6 +95,15 @@ int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { return uv_thread_create_ex(tid, ¶ms, entry, arg); } + +int uv_thread_detach(uv_thread_t *tid) { + if (CloseHandle(*tid) == 0) + return uv_translate_sys_error(GetLastError()); + + return 0; +} + + int uv_thread_create_ex(uv_thread_t* tid, const uv_thread_options_t* params, void (*entry)(void *arg), @@ -269,6 +278,71 @@ int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { } +int uv_thread_setname(const char* name) { + HRESULT hr; + WCHAR* namew; + int err; + char namebuf[UV_PTHREAD_MAX_NAMELEN_NP]; + + if (name == NULL) + return UV_EINVAL; + + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + + namew = NULL; + err = uv__convert_utf8_to_utf16(namebuf, &namew); + if (err) + return err; + + hr = SetThreadDescription(GetCurrentThread(), namew); + uv__free(namew); + if (FAILED(hr)) + return uv_translate_sys_error(HRESULT_CODE(hr)); + + return 0; +} + + +int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) { + HRESULT hr; + WCHAR* namew; + char* thread_name; + size_t buf_size; + int r; + DWORD exit_code; + + if (name == NULL || size == 0) + return UV_EINVAL; + + if (tid == NULL || *tid == NULL) + return UV_EINVAL; + + /* Check if the thread handle is valid */ + if (!GetExitCodeThread(*tid, &exit_code) || exit_code != STILL_ACTIVE) + return UV_ENOENT; + + namew = NULL; + thread_name = NULL; + hr = GetThreadDescription(*tid, &namew); + if (FAILED(hr)) + return uv_translate_sys_error(HRESULT_CODE(hr)); + + buf_size = size; + r = uv__copy_utf16_to_utf8(namew, -1, name, &buf_size); + if (r == UV_ENOBUFS) { + r = uv__convert_utf16_to_utf8(namew, wcslen(namew), &thread_name); + if (r == 0) { + uv__strscpy(name, thread_name, size); + uv__free(thread_name); + } + } + + LocalFree(namew); + return r; +} + + int uv_mutex_init(uv_mutex_t* mutex) { InitializeCriticalSection(mutex); return 0; diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c index 5c8f6e1dd0b449..e0873c2a899c24 100644 --- a/deps/uv/src/win/udp.c +++ b/deps/uv/src/win/udp.c @@ -1101,7 +1101,8 @@ int uv__udp_try_send(uv_udp_t* handle, struct sockaddr_storage converted; int err; - assert(nbufs > 0); + if (nbufs < 1) + return UV_EINVAL; if (addr != NULL) { err = uv__convert_to_localhost_if_unspecified(addr, &converted); @@ -1141,3 +1142,21 @@ int uv__udp_try_send(uv_udp_t* handle, return bytes; } + + +int uv__udp_try_send2(uv_udp_t* handle, + unsigned int count, + uv_buf_t* bufs[/*count*/], + unsigned int nbufs[/*count*/], + struct sockaddr* addrs[/*count*/]) { + unsigned int i; + int r; + + for (i = 0; i < count; i++) { + r = uv_udp_try_send(handle, bufs[i], nbufs[i], addrs[i]); + if (r < 0) + return i > 0 ? i : r; /* Error if first packet, else send count. */ + } + + return i; +} diff --git a/deps/uv/src/win/util.c b/deps/uv/src/win/util.c index e0dba1aaa94e28..1d1b2837e1a190 100644 --- a/deps/uv/src/win/util.c +++ b/deps/uv/src/win/util.c @@ -191,7 +191,7 @@ int uv_cwd(char* buffer, size_t* size) { WCHAR *utf16_buffer; int r; - if (buffer == NULL || size == NULL) { + if (buffer == NULL || size == NULL || *size == 0) { return UV_EINVAL; } @@ -874,56 +874,100 @@ void uv_free_interface_addresses(uv_interface_address_t* addresses, int uv_getrusage(uv_rusage_t *uv_rusage) { - FILETIME createTime, exitTime, kernelTime, userTime; - SYSTEMTIME kernelSystemTime, userSystemTime; - PROCESS_MEMORY_COUNTERS memCounters; - IO_COUNTERS ioCounters; + FILETIME create_time, exit_time, kernel_time, user_time; + SYSTEMTIME kernel_system_time, user_system_time; + PROCESS_MEMORY_COUNTERS mem_counters; + IO_COUNTERS io_counters; int ret; - ret = GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &kernelTime, &userTime); + ret = GetProcessTimes(GetCurrentProcess(), + &create_time, + &exit_time, + &kernel_time, + &user_time); if (ret == 0) { return uv_translate_sys_error(GetLastError()); } - ret = FileTimeToSystemTime(&kernelTime, &kernelSystemTime); + ret = FileTimeToSystemTime(&kernel_time, &kernel_system_time); if (ret == 0) { return uv_translate_sys_error(GetLastError()); } - ret = FileTimeToSystemTime(&userTime, &userSystemTime); + ret = FileTimeToSystemTime(&user_time, &user_system_time); if (ret == 0) { return uv_translate_sys_error(GetLastError()); } ret = GetProcessMemoryInfo(GetCurrentProcess(), - &memCounters, - sizeof(memCounters)); + &mem_counters, + sizeof(mem_counters)); if (ret == 0) { return uv_translate_sys_error(GetLastError()); } - ret = GetProcessIoCounters(GetCurrentProcess(), &ioCounters); + ret = GetProcessIoCounters(GetCurrentProcess(), &io_counters); if (ret == 0) { return uv_translate_sys_error(GetLastError()); } memset(uv_rusage, 0, sizeof(*uv_rusage)); - uv_rusage->ru_utime.tv_sec = userSystemTime.wHour * 3600 + - userSystemTime.wMinute * 60 + - userSystemTime.wSecond; - uv_rusage->ru_utime.tv_usec = userSystemTime.wMilliseconds * 1000; + uv_rusage->ru_utime.tv_sec = user_system_time.wHour * 3600 + + user_system_time.wMinute * 60 + + user_system_time.wSecond; + uv_rusage->ru_utime.tv_usec = user_system_time.wMilliseconds * 1000; - uv_rusage->ru_stime.tv_sec = kernelSystemTime.wHour * 3600 + - kernelSystemTime.wMinute * 60 + - kernelSystemTime.wSecond; - uv_rusage->ru_stime.tv_usec = kernelSystemTime.wMilliseconds * 1000; + uv_rusage->ru_stime.tv_sec = kernel_system_time.wHour * 3600 + + kernel_system_time.wMinute * 60 + + kernel_system_time.wSecond; + uv_rusage->ru_stime.tv_usec = kernel_system_time.wMilliseconds * 1000; - uv_rusage->ru_majflt = (uint64_t) memCounters.PageFaultCount; - uv_rusage->ru_maxrss = (uint64_t) memCounters.PeakWorkingSetSize / 1024; + uv_rusage->ru_majflt = (uint64_t) mem_counters.PageFaultCount; + uv_rusage->ru_maxrss = (uint64_t) mem_counters.PeakWorkingSetSize / 1024; - uv_rusage->ru_oublock = (uint64_t) ioCounters.WriteOperationCount; - uv_rusage->ru_inblock = (uint64_t) ioCounters.ReadOperationCount; + uv_rusage->ru_oublock = (uint64_t) io_counters.WriteOperationCount; + uv_rusage->ru_inblock = (uint64_t) io_counters.ReadOperationCount; + + return 0; +} + + +int uv_getrusage_thread(uv_rusage_t* uv_rusage) { + FILETIME create_time, exit_time, kernel_time, user_time; + SYSTEMTIME kernel_system_time, user_system_time; + int ret; + + ret = GetThreadTimes(GetCurrentThread(), + &create_time, + &exit_time, + &kernel_time, + &user_time); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = FileTimeToSystemTime(&kernel_time, &kernel_system_time); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = FileTimeToSystemTime(&user_time, &user_system_time); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + memset(uv_rusage, 0, sizeof(*uv_rusage)); + + uv_rusage->ru_utime.tv_sec = user_system_time.wHour * 3600 + + user_system_time.wMinute * 60 + + user_system_time.wSecond; + uv_rusage->ru_utime.tv_usec = user_system_time.wMilliseconds * 1000; + + uv_rusage->ru_stime.tv_sec = kernel_system_time.wHour * 3600 + + kernel_system_time.wMinute * 60 + + kernel_system_time.wSecond; + uv_rusage->ru_stime.tv_usec = kernel_system_time.wMilliseconds * 1000; return 0; } @@ -1589,7 +1633,7 @@ int uv_os_uname(uv_utsname_t* buffer) { version_size = sizeof(buffer->version) - version_size; r = uv__copy_utf16_to_utf8(os_info.szCSDVersion, -1, - buffer->version + + buffer->version + sizeof(buffer->version) - version_size, &version_size); if (r) diff --git a/deps/uv/src/win/winapi.c b/deps/uv/src/win/winapi.c index a74108db03e701..315a0d49aff50b 100644 --- a/deps/uv/src/win/winapi.c +++ b/deps/uv/src/win/winapi.c @@ -36,9 +36,6 @@ sNtQueryDirectoryFile pNtQueryDirectoryFile; sNtQuerySystemInformation pNtQuerySystemInformation; sNtQueryInformationProcess pNtQueryInformationProcess; -/* Kernel32 function pointers */ -sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx; - /* Powrprof.dll function pointer */ sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification; @@ -55,7 +52,6 @@ void uv__winapi_init(void) { HMODULE ntdll_module; HMODULE powrprof_module; HMODULE user32_module; - HMODULE kernel32_module; HMODULE ws2_32_module; HMODULE api_win_core_file_module; @@ -121,15 +117,6 @@ void uv__winapi_init(void) { uv_fatal_error(GetLastError(), "GetProcAddress"); } - kernel32_module = GetModuleHandleA("kernel32.dll"); - if (kernel32_module == NULL) { - uv_fatal_error(GetLastError(), "GetModuleHandleA"); - } - - pGetQueuedCompletionStatusEx = (sGetQueuedCompletionStatusEx) GetProcAddress( - kernel32_module, - "GetQueuedCompletionStatusEx"); - powrprof_module = LoadLibraryExA("powrprof.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); if (powrprof_module != NULL) { pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification) diff --git a/deps/uv/src/win/winapi.h b/deps/uv/src/win/winapi.h index 5800e70dfd7d11..4e0ccc61baf225 100644 --- a/deps/uv/src/win/winapi.h +++ b/deps/uv/src/win/winapi.h @@ -4150,40 +4150,35 @@ typedef struct _FILE_STAT_BASIC_INFORMATION { } FILE_STAT_BASIC_INFORMATION; #endif -/* MinGW already has a definition for REPARSE_DATA_BUFFER, but mingw-w64 does - * not. - */ -#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR) - typedef struct _REPARSE_DATA_BUFFER { - ULONG ReparseTag; - USHORT ReparseDataLength; - USHORT Reserved; - union { - struct { - USHORT SubstituteNameOffset; - USHORT SubstituteNameLength; - USHORT PrintNameOffset; - USHORT PrintNameLength; - ULONG Flags; - WCHAR PathBuffer[1]; - } SymbolicLinkReparseBuffer; - struct { - USHORT SubstituteNameOffset; - USHORT SubstituteNameLength; - USHORT PrintNameOffset; - USHORT PrintNameLength; - WCHAR PathBuffer[1]; - } MountPointReparseBuffer; - struct { - UCHAR DataBuffer[1]; - } GenericReparseBuffer; - struct { - ULONG StringCount; - WCHAR StringList[1]; - } AppExecLinkReparseBuffer; - }; - } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER; -#endif +typedef struct _REPARSE_DATA_BUFFER { + ULONG ReparseTag; + USHORT ReparseDataLength; + USHORT Reserved; + union { + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + ULONG Flags; + WCHAR PathBuffer[1]; + } SymbolicLinkReparseBuffer; + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + WCHAR PathBuffer[1]; + } MountPointReparseBuffer; + struct { + UCHAR DataBuffer[1]; + } GenericReparseBuffer; + struct { + ULONG StringCount; + WCHAR StringList[1]; + } AppExecLinkReparseBuffer; + }; +} REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER; typedef struct _IO_STATUS_BLOCK { union { @@ -4292,6 +4287,22 @@ typedef struct _FILE_BOTH_DIR_INFORMATION { WCHAR FileName[1]; } FILE_BOTH_DIR_INFORMATION, *PFILE_BOTH_DIR_INFORMATION; +typedef struct _FILE_ID_FULL_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + LARGE_INTEGER FileId; + WCHAR FileName[1]; +} FILE_ID_FULL_DIR_INFORMATION, *PFILE_ID_FULL_DIR_INFORMATION; + typedef struct _FILE_BASIC_INFORMATION { LARGE_INTEGER CreationTime; LARGE_INTEGER LastAccessTime; @@ -4661,15 +4672,6 @@ typedef NTSTATUS (NTAPI *sNtQueryInformationProcess) # define SYMBOLIC_LINK_FLAG_DIRECTORY 0x1 #endif -#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) - typedef struct _OVERLAPPED_ENTRY { - ULONG_PTR lpCompletionKey; - LPOVERLAPPED lpOverlapped; - ULONG_PTR Internal; - DWORD dwNumberOfBytesTransferred; - } OVERLAPPED_ENTRY, *LPOVERLAPPED_ENTRY; -#endif - /* from wincon.h */ #ifndef ENABLE_INSERT_MODE # define ENABLE_INSERT_MODE 0x20 @@ -4716,14 +4718,6 @@ typedef NTSTATUS (NTAPI *sNtQueryInformationProcess) # define ERROR_MUI_FILE_NOT_LOADED 15105 #endif -typedef BOOL (WINAPI *sGetQueuedCompletionStatusEx) - (HANDLE CompletionPort, - LPOVERLAPPED_ENTRY lpCompletionPortEntries, - ULONG ulCount, - PULONG ulNumEntriesRemoved, - DWORD dwMilliseconds, - BOOL fAlertable); - /* from powerbase.h */ #ifndef DEVICE_NOTIFY_CALLBACK # define DEVICE_NOTIFY_CALLBACK 2 @@ -4818,9 +4812,6 @@ extern sNtQueryDirectoryFile pNtQueryDirectoryFile; extern sNtQuerySystemInformation pNtQuerySystemInformation; extern sNtQueryInformationProcess pNtQueryInformationProcess; -/* Kernel32 function pointers */ -extern sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx; - /* Powrprof.dll function pointer */ extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification; @@ -4837,4 +4828,13 @@ typedef int (WINAPI *uv_sGetHostNameW) int); extern uv_sGetHostNameW pGetHostNameW; +/* processthreadsapi.h */ +#if defined(__MINGW32__) +WINBASEAPI +HRESULT WINAPI GetThreadDescription(HANDLE hThread, + PWSTR *ppszThreadDescription); +WINBASEAPI +HRESULT WINAPI SetThreadDescription(HANDLE hThread, PCWSTR lpThreadDescription); +#endif + #endif /* UV_WIN_WINAPI_H_ */ diff --git a/deps/uv/src/win/winsock.h b/deps/uv/src/win/winsock.h index 2af958870a7de6..bb3808a35c27e6 100644 --- a/deps/uv/src/win/winsock.h +++ b/deps/uv/src/win/winsock.h @@ -154,47 +154,6 @@ typedef struct _AFD_RECV_INFO { #define IOCTL_AFD_POLL \ _AFD_CONTROL_CODE(AFD_POLL, METHOD_BUFFERED) -#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) -typedef struct _IP_ADAPTER_UNICAST_ADDRESS_XP { - /* FIXME: __C89_NAMELESS was removed */ - /* __C89_NAMELESS */ union { - ULONGLONG Alignment; - /* __C89_NAMELESS */ struct { - ULONG Length; - DWORD Flags; - }; - }; - struct _IP_ADAPTER_UNICAST_ADDRESS_XP *Next; - SOCKET_ADDRESS Address; - IP_PREFIX_ORIGIN PrefixOrigin; - IP_SUFFIX_ORIGIN SuffixOrigin; - IP_DAD_STATE DadState; - ULONG ValidLifetime; - ULONG PreferredLifetime; - ULONG LeaseLifetime; -} IP_ADAPTER_UNICAST_ADDRESS_XP,*PIP_ADAPTER_UNICAST_ADDRESS_XP; - -typedef struct _IP_ADAPTER_UNICAST_ADDRESS_LH { - union { - ULONGLONG Alignment; - struct { - ULONG Length; - DWORD Flags; - }; - }; - struct _IP_ADAPTER_UNICAST_ADDRESS_LH *Next; - SOCKET_ADDRESS Address; - IP_PREFIX_ORIGIN PrefixOrigin; - IP_SUFFIX_ORIGIN SuffixOrigin; - IP_DAD_STATE DadState; - ULONG ValidLifetime; - ULONG PreferredLifetime; - ULONG LeaseLifetime; - UINT8 OnLinkPrefixLength; -} IP_ADAPTER_UNICAST_ADDRESS_LH,*PIP_ADAPTER_UNICAST_ADDRESS_LH; - -#endif - int uv__convert_to_localhost_if_unspecified(const struct sockaddr* addr, struct sockaddr_storage* storage); diff --git a/deps/uv/test/runner.c b/deps/uv/test/runner.c index d1dd02f5ce0806..54abb39dd22886 100644 --- a/deps/uv/test/runner.c +++ b/deps/uv/test/runner.c @@ -27,6 +27,11 @@ #include "task.h" #include "uv.h" +/* Refs: https://github.com/libuv/libuv/issues/4369 */ +#if defined(__ANDROID__) +#include +#endif + char executable_path[sizeof(executable_path)]; @@ -142,6 +147,13 @@ void log_tap_result(int test_count, fflush(stdout); } +void enable_fdsan(void) { +/* Refs: https://github.com/libuv/libuv/issues/4369 */ +#if defined(__ANDROID__) + android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ALWAYS); +#endif +} + int run_test(const char* test, int benchmark_output, @@ -160,6 +172,8 @@ int run_test(const char* test, main_proc = NULL; process_count = 0; + enable_fdsan(); + #ifndef _WIN32 /* Clean up stale socket from previous run. */ remove(TEST_PIPENAME); diff --git a/deps/uv/test/test-fs-event.c b/deps/uv/test/test-fs-event.c index bb223a5f654c03..b53057dc25bb22 100644 --- a/deps/uv/test/test-fs-event.c +++ b/deps/uv/test/test-fs-event.c @@ -153,7 +153,14 @@ static void fs_event_cb_del_dir(uv_fs_event_t* handle, ASSERT_PTR_EQ(handle, &fs_event); ASSERT_OK(status); ASSERT(events == UV_CHANGE || events == UV_RENAME); + /* There is a bug in the FreeBSD kernel where the filename is sometimes NULL. + * Refs: https://github.com/libuv/libuv/issues/4606 + */ + #if defined(__FreeBSD__) + ASSERT(filename == NULL || strcmp(filename, "watch_del_dir") == 0); + #else ASSERT_OK(strcmp(filename, "watch_del_dir")); + #endif ASSERT_OK(uv_fs_event_stop(handle)); uv_close((uv_handle_t*)handle, close_cb); } @@ -1121,7 +1128,7 @@ TEST_IMPL(fs_event_getpath) { ASSERT_EQ(r, UV_EINVAL); r = uv_fs_event_start(&fs_event, fail_cb, watch_dir[i], 0); ASSERT_OK(r); - len = 0; + len = 1; r = uv_fs_event_getpath(&fs_event, buf, &len); ASSERT_EQ(r, UV_ENOBUFS); ASSERT_LT(len, sizeof buf); /* sanity check */ diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c index 33cbd428707c36..423d72dd2f7b84 100644 --- a/deps/uv/test/test-fs.c +++ b/deps/uv/test/test-fs.c @@ -4507,6 +4507,60 @@ TEST_IMPL(fs_open_readonly_acl) { MAKE_VALGRIND_HAPPY(loop); return 0; } + +TEST_IMPL(fs_stat_no_permission) { + uv_passwd_t pwd; + uv_fs_t req; + int r; + char* filename = "test_file_no_permission.txt"; + + /* Setup - clear the ACL and remove the file */ + loop = uv_default_loop(); + r = uv_os_get_passwd(&pwd); + ASSERT_OK(r); + call_icacls("icacls %s /remove *S-1-1-0:(F)", filename); + unlink(filename); + + /* Create the file */ + r = uv_fs_open(loop, + &open_req1, + filename, + UV_FS_O_RDONLY | UV_FS_O_CREAT, + S_IRUSR, + NULL); + ASSERT_GE(r, 0); + ASSERT_GE(open_req1.result, 0); + uv_fs_req_cleanup(&open_req1); + r = uv_fs_close(NULL, &close_req, open_req1.result, NULL); + ASSERT_OK(r); + ASSERT_OK(close_req.result); + uv_fs_req_cleanup(&close_req); + + /* Set up ACL */ + r = call_icacls("icacls %s /deny *S-1-1-0:(F)", filename); + if (r != 0) { + goto acl_cleanup; + } + + /* Read file stats */ + r = uv_fs_stat(NULL, &req, filename, NULL); + if (r != 0) { + goto acl_cleanup; + } + + uv_fs_req_cleanup(&req); + + acl_cleanup: + /* Cleanup */ + call_icacls("icacls %s /reset", filename); + uv_fs_unlink(NULL, &unlink_req, filename, NULL); + uv_fs_req_cleanup(&unlink_req); + unlink(filename); + uv_os_free_passwd(&pwd); + ASSERT_OK(r); + MAKE_VALGRIND_HAPPY(loop); + return 0; +} #endif #ifdef _WIN32 diff --git a/deps/uv/test/test-idna.c b/deps/uv/test/test-idna.c index 28f9eaaae9e77a..46df9f3c581015 100644 --- a/deps/uv/test/test-idna.c +++ b/deps/uv/test/test-idna.c @@ -39,7 +39,7 @@ TEST_IMPL(utf8_decode1) { /* Two-byte sequences. */ p = b; - snprintf(b, sizeof(b), "\xC2\x80\xDF\xBF"); + snprintf(b, sizeof(b), "%s", "\xC2\x80\xDF\xBF"); ASSERT_EQ(128, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 2); ASSERT_EQ(0x7FF, uv__utf8_decode1(&p, b + sizeof(b))); @@ -47,7 +47,7 @@ TEST_IMPL(utf8_decode1) { /* Three-byte sequences. */ p = b; - snprintf(b, sizeof(b), "\xE0\xA0\x80\xEF\xBF\xBF"); + snprintf(b, sizeof(b), "%s", "\xE0\xA0\x80\xEF\xBF\xBF"); ASSERT_EQ(0x800, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 3); ASSERT_EQ(0xFFFF, uv__utf8_decode1(&p, b + sizeof(b))); @@ -55,7 +55,7 @@ TEST_IMPL(utf8_decode1) { /* Four-byte sequences. */ p = b; - snprintf(b, sizeof(b), "\xF0\x90\x80\x80\xF4\x8F\xBF\xBF"); + snprintf(b, sizeof(b), "%s", "\xF0\x90\x80\x80\xF4\x8F\xBF\xBF"); ASSERT_EQ(0x10000, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 4); ASSERT_EQ(0x10FFFF, uv__utf8_decode1(&p, b + sizeof(b))); @@ -63,7 +63,7 @@ TEST_IMPL(utf8_decode1) { /* Four-byte sequences > U+10FFFF; disallowed. */ p = b; - snprintf(b, sizeof(b), "\xF4\x90\xC0\xC0\xF7\xBF\xBF\xBF"); + snprintf(b, sizeof(b), "%s", "\xF4\x90\xC0\xC0\xF7\xBF\xBF\xBF"); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 4); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); @@ -71,7 +71,7 @@ TEST_IMPL(utf8_decode1) { /* Overlong; disallowed. */ p = b; - snprintf(b, sizeof(b), "\xC0\x80\xC1\x80"); + snprintf(b, sizeof(b), "%s", "\xC0\x80\xC1\x80"); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 2); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); @@ -79,7 +79,7 @@ TEST_IMPL(utf8_decode1) { /* Surrogate pairs; disallowed. */ p = b; - snprintf(b, sizeof(b), "\xED\xA0\x80\xED\xA3\xBF"); + snprintf(b, sizeof(b), "%s", "\xED\xA0\x80\xED\xA3\xBF"); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); ASSERT_PTR_EQ(p, b + 3); ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); @@ -87,7 +87,7 @@ TEST_IMPL(utf8_decode1) { /* Simply illegal. */ p = b; - snprintf(b, sizeof(b), "\xF8\xF9\xFA\xFB\xFC\xFD\xFE\xFF"); + snprintf(b, sizeof(b), "%s", "\xF8\xF9\xFA\xFB\xFC\xFD\xFE\xFF"); for (i = 1; i <= 8; i++) { ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + sizeof(b))); @@ -218,3 +218,15 @@ TEST_IMPL(idna_toascii) { #undef T #endif /* __MVS__ */ + +TEST_IMPL(wtf8) { + static const char input[] = "ᜄȺy𐞲:𞢢𘴇𐀀'¥3̞[ + +struct semaphores { + uv_sem_t main; + uv_sem_t worker; +}; + +static void thread_run(void* arg) { + int r; + char thread_name[16]; + struct semaphores* sem; + uv_thread_t thread; + + sem = arg; + +#ifdef _WIN32 + /* uv_thread_self isn't defined for the main thread on Windows. */ + thread = GetCurrentThread(); +#else + thread = uv_thread_self(); +#endif + + r = uv_thread_setname("worker-thread"); + ASSERT_OK(r); + + uv_sem_post(&sem->worker); + + r = uv_thread_getname(&thread, thread_name, sizeof(thread_name)); + ASSERT_OK(r); + + ASSERT_STR_EQ(thread_name, "worker-thread"); + + uv_sem_wait(&sem->main); +} + +TEST_IMPL(thread_name) { + int r; + uv_thread_t threads[2]; + char tn[UV_PTHREAD_MAX_NAMELEN_NP]; + char thread_name[UV_PTHREAD_MAX_NAMELEN_NP]; + char long_thread_name[UV_PTHREAD_MAX_NAMELEN_NP + 1]; + struct semaphores sem; + +#if defined(__ANDROID_API__) && __ANDROID_API__ < 26 || \ + defined(_AIX) || \ + defined(__MVS__) || \ + defined(__PASE__) + RETURN_SKIP("API not available on this platform"); +#endif + + ASSERT_OK(uv_sem_init(&sem.main, 0)); + ASSERT_OK(uv_sem_init(&sem.worker, 0)); + + memset(thread_name, 'a', sizeof(thread_name) - 1); + thread_name[sizeof(thread_name) - 1] = '\0'; + + memset(long_thread_name, 'a', sizeof(long_thread_name) - 1); + long_thread_name[sizeof(long_thread_name) - 1] = '\0'; + +#ifdef _WIN32 + /* uv_thread_self isn't defined for the main thread on Windows. */ + threads[0] = GetCurrentThread(); +#else + threads[0] = uv_thread_self(); +#endif + + r = uv_thread_getname(&threads[0], tn, sizeof(tn)); + ASSERT_OK(r); + + r = uv_thread_setname(long_thread_name); + ASSERT_OK(r); + + r = uv_thread_getname(&threads[0], tn, sizeof(tn)); + ASSERT_OK(r); + ASSERT_STR_EQ(tn, thread_name); + + r = uv_thread_setname(thread_name); + ASSERT_OK(r); + + r = uv_thread_getname(&threads[0], tn, sizeof(tn)); + ASSERT_OK(r); + ASSERT_STR_EQ(tn, thread_name); + + r = uv_thread_getname(&threads[0], tn, 3); + ASSERT_OK(r); + ASSERT_EQ(strlen(tn), 2); + ASSERT_OK(memcmp(thread_name, tn, 2)); + + /* Illumos doesn't support non-ASCII thread names. */ +#ifndef __illumos__ + r = uv_thread_setname("~½¬{½"); + ASSERT_OK(r); + + r = uv_thread_getname(&threads[0], tn, sizeof(tn)); + ASSERT_OK(r); + ASSERT_STR_EQ(tn, "~½¬{½"); +#endif + + ASSERT_OK(uv_thread_create(threads + 1, thread_run, &sem)); + + uv_sem_wait(&sem.worker); + + r = uv_thread_getname(threads + 1, tn, sizeof(tn)); + ASSERT_OK(r); + + ASSERT_STR_EQ(tn, "worker-thread"); + + uv_sem_post(&sem.main); + + ASSERT_OK(uv_thread_join(threads + 1)); + + uv_sem_destroy(&sem.main); + uv_sem_destroy(&sem.worker); + + return 0; +} + +#define MAX_THREADS 4 + +static void* executedThreads[MAX_THREADS] = { NULL }; +static int size; +static uv_loop_t* loop; + +static unsigned short int key_exists(void* key) { + size_t i; + for (i = 0; i < MAX_THREADS; i++) { + if (executedThreads[i] == key) { + return 1; + } + } + return 0; +} + +static void work_cb(uv_work_t* req) { + uv_thread_t thread = uv_thread_self(); + req->data = &thread; + char tn[UV_PTHREAD_MAX_NAMELEN_NP]; + ASSERT_OK(uv_thread_getname(&thread, tn, sizeof(tn))); + ASSERT_STR_EQ(tn, "libuv-worker"); +} + +static void after_work_cb(uv_work_t* req, int status) { + ASSERT_OK(status); + if (!key_exists(req->data)) { + executedThreads[size++] = req->data; + } + + if (size == MAX_THREADS) { + return; + } + + uv_queue_work(loop, req, work_cb, after_work_cb); +} + +TEST_IMPL(thread_name_threadpool) { + uv_work_t req; + loop = uv_default_loop(); + // Just to make sure all workers will be executed + // with the correct thread name + ASSERT_OK(uv_queue_work(loop, &req, work_cb, after_work_cb)); + uv_run(loop, UV_RUN_DEFAULT); + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} diff --git a/deps/uv/test/test-thread.c b/deps/uv/test/test-thread.c index d0094e304435bb..819bbd5c92399d 100644 --- a/deps/uv/test/test-thread.c +++ b/deps/uv/test/test-thread.c @@ -294,3 +294,13 @@ TEST_IMPL(thread_stack_size_explicit) { return 0; } + +static void thread_detach_cb(void* arg) {} + +TEST_IMPL(thread_detach) { + uv_thread_t thread; + ASSERT_OK(uv_thread_create(&thread, thread_detach_cb, NULL)); + ASSERT_OK(uv_thread_detach(&thread)); + + return 0; +} diff --git a/deps/uv/test/test-udp-mmsg.c b/deps/uv/test/test-udp-mmsg.c index c0e000b9d92bbf..73213c43d97aa2 100644 --- a/deps/uv/test/test-udp-mmsg.c +++ b/deps/uv/test/test-udp-mmsg.c @@ -32,12 +32,12 @@ #define BUFFER_MULTIPLIER 20 #define MAX_DGRAM_SIZE (64 * 1024) #define NUM_SENDS 40 -#define EXPECTED_MMSG_ALLOCS (NUM_SENDS / BUFFER_MULTIPLIER) static uv_udp_t recver; static uv_udp_t sender; static int recv_cb_called; static int received_datagrams; +static int read_bytes; static int close_cb_called; static int alloc_cb_called; @@ -74,6 +74,7 @@ static void recv_cb(uv_udp_t* handle, const struct sockaddr* addr, unsigned flags) { ASSERT_GE(nread, 0); + read_bytes += nread; /* free and return if this is a mmsg free-only callback invocation */ if (flags & UV_UDP_MMSG_FREE) { @@ -140,7 +141,7 @@ TEST_IMPL(udp_mmsg) { /* On platforms that don't support mmsg, each recv gets its own alloc */ if (uv_udp_using_recvmmsg(&recver)) - ASSERT_EQ(alloc_cb_called, EXPECTED_MMSG_ALLOCS); + ASSERT_EQ(read_bytes, NUM_SENDS * 4); /* we're sending 4 bytes per datagram */ else ASSERT_EQ(alloc_cb_called, recv_cb_called); diff --git a/deps/uv/test/test-udp-multicast-join.c b/deps/uv/test/test-udp-multicast-join.c index 9e322dc579fc33..58b055561c6ded 100644 --- a/deps/uv/test/test-udp-multicast-join.c +++ b/deps/uv/test/test-udp-multicast-join.c @@ -36,10 +36,9 @@ static uv_udp_t client; static uv_udp_send_t req; static uv_udp_send_t req_ss; +static int darwin_ebusy_errors; static int cl_recv_cb_called; - static int sv_send_cb_called; - static int close_cb_called; static void alloc_cb(uv_handle_t* handle, @@ -128,6 +127,13 @@ static void cl_recv_cb(uv_udp_t* handle, #if !defined(__NetBSD__) r = uv_udp_set_source_membership(&server, MULTICAST_ADDR, NULL, source_addr, UV_JOIN_GROUP); +#if defined(__APPLE__) + if (r == UV_EBUSY) { + uv_close((uv_handle_t*) &server, close_cb); + darwin_ebusy_errors++; + return; + } +#endif ASSERT_OK(r); #endif @@ -160,7 +166,13 @@ TEST_IMPL(udp_multicast_join) { r = uv_udp_set_membership(&server, MULTICAST_ADDR, NULL, UV_JOIN_GROUP); if (r == UV_ENODEV) RETURN_SKIP("No multicast support."); + if (r == UV_ENOEXEC) + RETURN_SKIP("No multicast support (likely a firewall issue)."); ASSERT_OK(r); +#if defined(__ANDROID__) + /* It returns an ENOSYS error */ + RETURN_SKIP("Test does not currently work in ANDROID"); +#endif r = uv_udp_recv_start(&server, alloc_cb, cl_recv_cb); ASSERT_OK(r); @@ -175,6 +187,9 @@ TEST_IMPL(udp_multicast_join) { /* run the loop till all events are processed */ uv_run(uv_default_loop(), UV_RUN_DEFAULT); + if (darwin_ebusy_errors > 0) + RETURN_SKIP("Unexplained macOS IP_ADD_SOURCE_MEMBERSHIP EBUSY bug"); + ASSERT_EQ(2, cl_recv_cb_called); ASSERT_EQ(2, sv_send_cb_called); ASSERT_EQ(2, close_cb_called); diff --git a/deps/uv/test/test-udp-multicast-join6.c b/deps/uv/test/test-udp-multicast-join6.c index c6872e4283247d..430e4e3321e859 100644 --- a/deps/uv/test/test-udp-multicast-join6.c +++ b/deps/uv/test/test-udp-multicast-join6.c @@ -33,6 +33,7 @@ #if defined(__APPLE__) || \ defined(_AIX) || \ defined(__MVS__) || \ + defined(__FreeBSD__) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) #define MULTICAST_ADDR "ff02::1%lo0" diff --git a/deps/uv/test/test-udp-try-send.c b/deps/uv/test/test-udp-try-send.c index 0c76fb1c84df68..6181fbbbffca3b 100644 --- a/deps/uv/test/test-udp-try-send.c +++ b/deps/uv/test/test-udp-try-send.c @@ -60,8 +60,6 @@ static void sv_recv_cb(uv_udp_t* handle, const uv_buf_t* rcvbuf, const struct sockaddr* addr, unsigned flags) { - ASSERT_GT(nread, 0); - if (nread == 0) { ASSERT_NULL(addr); return; @@ -70,11 +68,17 @@ static void sv_recv_cb(uv_udp_t* handle, ASSERT_EQ(4, nread); ASSERT_NOT_NULL(addr); - ASSERT_OK(memcmp("EXIT", rcvbuf->base, nread)); - uv_close((uv_handle_t*) handle, close_cb); - uv_close((uv_handle_t*) &client, close_cb); + if (!memcmp("EXIT", rcvbuf->base, nread)) { + uv_close((uv_handle_t*) handle, close_cb); + uv_close((uv_handle_t*) &client, close_cb); + } else { + ASSERT_MEM_EQ(rcvbuf->base, "HELO", 4); + } sv_recv_cb_called++; + + if (sv_recv_cb_called == 2) + uv_udp_recv_stop(handle); } @@ -101,9 +105,33 @@ TEST_IMPL(udp_try_send) { ASSERT_OK(r); buf = uv_buf_init(buffer, sizeof(buffer)); + + r = uv_udp_try_send(&client, &buf, 0, (const struct sockaddr*) &addr); + ASSERT_EQ(r, UV_EINVAL); + r = uv_udp_try_send(&client, &buf, 1, (const struct sockaddr*) &addr); ASSERT_EQ(r, UV_EMSGSIZE); + uv_buf_t* bufs[] = {&buf, &buf}; + unsigned int nbufs[] = {1, 1}; + struct sockaddr* addrs[] = { + (struct sockaddr*) &addr, + (struct sockaddr*) &addr, + }; + + ASSERT_EQ(0, sv_recv_cb_called); + + buf = uv_buf_init("HELO", 4); + r = uv_udp_try_send2(&client, 2, bufs, nbufs, addrs, /*flags*/0); + ASSERT_EQ(r, 2); + + uv_run(uv_default_loop(), UV_RUN_DEFAULT); + + ASSERT_EQ(2, sv_recv_cb_called); + + r = uv_udp_recv_start(&server, alloc_cb, sv_recv_cb); + ASSERT_OK(r); + buf = uv_buf_init("EXIT", 4); r = uv_udp_try_send(&client, &buf, 1, (const struct sockaddr*) &addr); ASSERT_EQ(4, r); @@ -111,7 +139,7 @@ TEST_IMPL(udp_try_send) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(2, close_cb_called); - ASSERT_EQ(1, sv_recv_cb_called); + ASSERT_EQ(3, sv_recv_cb_called); ASSERT_OK(client.send_queue_size); ASSERT_OK(server.send_queue_size); From 56b21489f457abec23808f85f158046e26251bc8 Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Fri, 17 Jan 2025 18:50:56 -0300 Subject: [PATCH 099/158] doc: mention prepare --security PR-URL: https://github.com/nodejs/node/pull/56617 Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca --- doc/contributing/releases.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/contributing/releases.md b/doc/contributing/releases.md index 40ba96da602033..5b6d2180515565 100644 --- a/doc/contributing/releases.md +++ b/doc/contributing/releases.md @@ -308,6 +308,22 @@ branch. git checkout -b v1.2.3-proposal upstream/v1.x-staging ``` +You can also run: + +```bash +git node release -S --prepare --security --filterLabel vX.x +``` + +Example: + +```bash +git checkout v20.x +git node release -S --prepare --security --filterLabel v20.x +``` + +to automate the remaining steps until step 6 or you can perform it manually +following the below steps. +
Security release From 21362cc4f46469c672f02ddf588ca9e608f19953 Mon Sep 17 00:00:00 2001 From: Colin Ihrig Date: Sat, 18 Jan 2025 13:01:54 -0500 Subject: [PATCH 100/158] punycode: limit deprecation warning DEP0040 is an extremely annoying warning. Most of the people seeing it cannot do anything about it. This commit updates the warning logic to only emit outside of node_modules. This is similar to other warnings such as the Buffer() constructor warning. Ideally, this should be backported to Node 22. Refs: https://github.com/nodejs/node/pull/47202 PR-URL: https://github.com/nodejs/node/pull/56632 Reviewed-By: Jordan Harband Reviewed-By: Richard Lau Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Matteo Collina Reviewed-By: Antoine du Hamel Reviewed-By: Joyee Cheung --- lib/punycode.js | 19 ++++++++++++------- .../errors/core_line_numbers.snapshot | 6 +++--- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/lib/punycode.js b/lib/punycode.js index 7dfe552a5c9efa..e303a5373b8839 100644 --- a/lib/punycode.js +++ b/lib/punycode.js @@ -1,11 +1,16 @@ 'use strict'; - -process.emitWarning( - 'The `punycode` module is deprecated. Please use a userland ' + - 'alternative instead.', - 'DeprecationWarning', - 'DEP0040', -); +const { + isInsideNodeModules, +} = internalBinding('util'); + +if (!isInsideNodeModules(100, true)) { + process.emitWarning( + 'The `punycode` module is deprecated. Please use a userland ' + + 'alternative instead.', + 'DeprecationWarning', + 'DEP0040', + ); +} /** Highest positive signed 32-bit float value */ const maxInt = 2147483647; // aka. 0x7FFFFFFF or 2^31-1 diff --git a/test/fixtures/errors/core_line_numbers.snapshot b/test/fixtures/errors/core_line_numbers.snapshot index 54cdb52744b29e..9ef06c33af8e28 100644 --- a/test/fixtures/errors/core_line_numbers.snapshot +++ b/test/fixtures/errors/core_line_numbers.snapshot @@ -1,10 +1,10 @@ -node:punycode:49 +node:punycode:54 throw new RangeError(errors[type]); ^ RangeError: Invalid input - at error (node:punycode:49:8) - at Object.decode (node:punycode:242:5) + at error (node:punycode:54:8) + at Object.decode (node:punycode:247:5) at Object. (*core_line_numbers.js:13:10) Node.js * From 8bbdb1203ec158b28b2070ab4c2f3c37f8a3f9d0 Mon Sep 17 00:00:00 2001 From: Maksim Gorkov <33923276+MGorkov@users.noreply.github.com> Date: Sat, 18 Jan 2025 22:39:56 +0300 Subject: [PATCH 101/158] child_process: fix parsing messages with splitted length field MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: https://github.com/nodejs/node/issues/55834 PR-URL: https://github.com/nodejs/node/pull/56106 Reviewed-By: Luigi Pinca Reviewed-By: Juan José Arboleda Reviewed-By: James M Snell --- lib/internal/child_process/serialization.js | 7 +++++- ...ced-serialization-splitted-length-field.js | 24 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 test/parallel/test-child-process-advanced-serialization-splitted-length-field.js diff --git a/lib/internal/child_process/serialization.js b/lib/internal/child_process/serialization.js index 7be39f0d48c3c2..46bb1faaf9fc21 100644 --- a/lib/internal/child_process/serialization.js +++ b/lib/internal/child_process/serialization.js @@ -61,7 +61,12 @@ const advanced = { *parseChannelMessages(channel, readData) { if (readData.length === 0) return; - ArrayPrototypePush(channel[kMessageBuffer], readData); + if (channel[kMessageBufferSize] && channel[kMessageBuffer][0].length < 4) { + // Message length split into two buffers, so let's concatenate it. + channel[kMessageBuffer][0] = Buffer.concat([channel[kMessageBuffer][0], readData]); + } else { + ArrayPrototypePush(channel[kMessageBuffer], readData); + } channel[kMessageBufferSize] += readData.length; // Index 0 should always be present because we just pushed data into it. diff --git a/test/parallel/test-child-process-advanced-serialization-splitted-length-field.js b/test/parallel/test-child-process-advanced-serialization-splitted-length-field.js new file mode 100644 index 00000000000000..5407a56f495c8f --- /dev/null +++ b/test/parallel/test-child-process-advanced-serialization-splitted-length-field.js @@ -0,0 +1,24 @@ +'use strict'; +const common = require('../common'); +const child_process = require('child_process'); + +// Regression test for https://github.com/nodejs/node/issues/55834 +const msgLen = 65521; +let cnt = 10; + +if (process.argv[2] === 'child') { + const msg = Buffer.allocUnsafe(msgLen); + (function send() { + if (cnt--) { + process.send(msg, send); + } else { + process.disconnect(); + } + })(); +} else { + const child = child_process.spawn(process.execPath, [__filename, 'child'], { + stdio: ['inherit', 'inherit', 'inherit', 'ipc'], + serialization: 'advanced' + }); + child.on('message', common.mustCall(cnt)); +} From a5f90232978da7baa7afcf7950e6c094aa58524b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sun, 19 Jan 2025 17:47:50 +0100 Subject: [PATCH 102/158] src: initialize FSReqWrapSync in path that uses it PR-URL: https://github.com/nodejs/node/pull/56613 Reviewed-By: James M Snell Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca --- src/node_file.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node_file.cc b/src/node_file.cc index 1b56d2323c9526..984bc55ee9b941 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -2482,7 +2482,6 @@ static void WriteString(const FunctionCallbackInfo& args) { } } else { // write(fd, string, pos, enc, undefined, ctx) CHECK_EQ(argc, 6); - FSReqWrapSync req_wrap_sync; FSReqBase::FSReqBuffer stack_buffer; if (buf == nullptr) { if (!StringBytes::StorageSize(isolate, value, enc).To(&len)) @@ -2496,6 +2495,7 @@ static void WriteString(const FunctionCallbackInfo& args) { buf = *stack_buffer; } uv_buf_t uvbuf = uv_buf_init(buf, len); + FSReqWrapSync req_wrap_sync("write"); FS_SYNC_TRACE_BEGIN(write); int bytesWritten = SyncCall(env, args[5], &req_wrap_sync, "write", uv_fs_write, fd, &uvbuf, 1, pos); From edd936149944a4c9f1dc97dd840350f02af32f08 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Sun, 19 Jan 2025 08:55:54 -0800 Subject: [PATCH 103/158] deps: fixup some minor coverity warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: https://github.com/nodejs/node/issues/56611 PR-URL: https://github.com/nodejs/node/pull/56612 Reviewed-By: Michaël Zasso Reviewed-By: Benjamin Gruenbaum Reviewed-By: Yagiz Nizipli Reviewed-By: Michael Dawson Reviewed-By: Ulises Gascón --- deps/ncrypto/ncrypto.cc | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/deps/ncrypto/ncrypto.cc b/deps/ncrypto/ncrypto.cc index fa0cf58062d897..ce2e7b384eb198 100644 --- a/deps/ncrypto/ncrypto.cc +++ b/deps/ncrypto/ncrypto.cc @@ -1346,8 +1346,11 @@ DHPointer DHPointer::New(BignumPointer&& p, BignumPointer&& g) { if (DH_set0_pqg(dh.get(), p.get(), nullptr, g.get()) != 1) return {}; // If the call above is successful, the DH object takes ownership of the - // BIGNUMs, so we must release them here. + // BIGNUMs, so we must release them here. Unfortunately coverity does not + // know that so we need to tell it not to complain. + // coverity[resource_leak] p.release(); + // coverity[resource_leak] g.release(); return dh; @@ -1430,7 +1433,10 @@ DataPointer DHPointer::generateKeys() const { size_t DHPointer::size() const { if (!dh_) return 0; - return DH_size(dh_.get()); + int ret = DH_size(dh_.get()); + // DH_size can return a -1 on error but we just want to return a 0 + // in that case so we don't wrap around when returning the size_t. + return ret >= 0 ? static_cast(ret) : 0; } DataPointer DHPointer::computeSecret(const BignumPointer& peer) const { @@ -1459,6 +1465,10 @@ DataPointer DHPointer::computeSecret(const BignumPointer& peer) const { bool DHPointer::setPublicKey(BignumPointer&& key) { if (!dh_) return false; if (DH_set0_key(dh_.get(), key.get(), nullptr) == 1) { + // If DH_set0_key returns successfully, then dh_ takes ownership of the + // BIGNUM, so we must release it here. Unfortunately coverity does not + // know that so we need to tell it not to complain. + // coverity[resource_leak] key.release(); return true; } @@ -1468,6 +1478,10 @@ bool DHPointer::setPublicKey(BignumPointer&& key) { bool DHPointer::setPrivateKey(BignumPointer&& key) { if (!dh_) return false; if (DH_set0_key(dh_.get(), nullptr, key.get()) == 1) { + // If DH_set0_key returns successfully, then dh_ takes ownership of the + // BIGNUM, so we must release it here. Unfortunately coverity does not + // know that so we need to tell it not to complain. + // coverity[resource_leak] key.release(); return true; } From 86d7ba09c4f63a96c5434060a2c0c187ecd8cd8f Mon Sep 17 00:00:00 2001 From: Meghan Denny Date: Sun, 19 Jan 2025 08:56:09 -0800 Subject: [PATCH 104/158] test: test-stream-compose.js doesn't need internals PR-URL: https://github.com/nodejs/node/pull/56619 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Jake Yuesong Li Reviewed-By: James M Snell --- test/parallel/test-stream-compose.js | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/parallel/test-stream-compose.js b/test/parallel/test-stream-compose.js index 1ff8c39b7a2234..d7a54e177668a2 100644 --- a/test/parallel/test-stream-compose.js +++ b/test/parallel/test-stream-compose.js @@ -1,5 +1,3 @@ -// Flags: --expose-internals - 'use strict'; const common = require('../common'); @@ -9,9 +7,9 @@ const { Transform, Writable, finished, + compose, PassThrough } = require('stream'); -const compose = require('internal/streams/compose'); const assert = require('assert'); { From 42020456732425b36c747fa79f1caf60ace61155 Mon Sep 17 00:00:00 2001 From: islandryu Date: Sun, 29 Dec 2024 13:41:34 +0900 Subject: [PATCH 105/158] http2: omit server name when HTTP2 host is IP address Fixes: https://github.com/nodejs/node/issues/56189 PR-URL: https://github.com/nodejs/node/pull/56530 Reviewed-By: Matteo Collina Reviewed-By: Yongsheng Zhang Reviewed-By: Luigi Pinca --- lib/internal/http2/core.js | 24 ++++++---- test/parallel/test-http2-ip-address-host.js | 53 +++++++++++++++++++++ 2 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 test/parallel/test-http2-ip-address-host.js diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js index 0fdf516baafd74..554221ac614636 100644 --- a/lib/internal/http2/core.js +++ b/lib/internal/http2/core.js @@ -645,15 +645,21 @@ function initOriginSet(session) { if (originSet === undefined) { const socket = session[kSocket]; session[kState].originSet = originSet = new SafeSet(); - if (socket.servername != null) { - let originString = `https://${socket.servername}`; - if (socket.remotePort != null) - originString += `:${socket.remotePort}`; - // We have to ensure that it is a properly serialized - // ASCII origin string. The socket.servername might not - // be properly ASCII encoded. - originSet.add(getURLOrigin(originString)); + let hostName = socket.servername; + if (hostName === null || hostName === false) { + if (socket.remoteFamily === 'IPv6') { + hostName = `[${socket.remoteAddress}]`; + } else { + hostName = socket.remoteAddress; + } } + let originString = `https://${hostName}`; + if (socket.remotePort != null) + originString += `:${socket.remotePort}`; + // We have to ensure that it is a properly serialized + // ASCII origin string. The socket.servername might not + // be properly ASCII encoded. + originSet.add(getURLOrigin(originString)); } return originSet; } @@ -3342,7 +3348,7 @@ function connect(authority, options, listener) { socket = net.connect({ port, host, ...options }); break; case 'https:': - socket = tls.connect(port, host, initializeTLSOptions(options, host)); + socket = tls.connect(port, host, initializeTLSOptions(options, net.isIP(host) ? undefined : host)); break; default: throw new ERR_HTTP2_UNSUPPORTED_PROTOCOL(protocol); diff --git a/test/parallel/test-http2-ip-address-host.js b/test/parallel/test-http2-ip-address-host.js new file mode 100644 index 00000000000000..c0699a89169153 --- /dev/null +++ b/test/parallel/test-http2-ip-address-host.js @@ -0,0 +1,53 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) { common.skip('missing crypto'); }; +const assert = require('assert'); +const fixtures = require('../common/fixtures'); +const h2 = require('http2'); + +function loadKey(keyname) { + return fixtures.readKey(keyname, 'binary'); +} + +const key = loadKey('agent8-key.pem'); +const cert = fixtures.readKey('agent8-cert.pem'); + +const server = h2.createSecureServer({ key, cert }); +const hasIPv6 = common.hasIPv6; +const testCount = hasIPv6 ? 2 : 1; + +server.on('stream', common.mustCall((stream) => { + const session = stream.session; + assert.strictEqual(session.servername, undefined); + stream.respond({ 'content-type': 'application/json' }); + stream.end(JSON.stringify({ + servername: session.servername, + originSet: session.originSet + }) + ); +}, testCount)); + +let done = 0; + +server.listen(0, common.mustCall(() => { + function handleRequest(url) { + const client = h2.connect(url, + { rejectUnauthorized: false }); + const req = client.request(); + let data = ''; + req.setEncoding('utf8'); + req.on('data', (d) => data += d); + req.on('end', common.mustCall(() => { + const originSet = req.session.originSet; + assert.strictEqual(originSet[0], url); + client.close(); + if (++done === testCount) server.close(); + })); + } + + const ipv4Url = `https://127.0.0.1:${server.address().port}`; + const ipv6Url = `https://[::1]:${server.address().port}`; + handleRequest(ipv4Url); + if (hasIPv6) handleRequest(ipv6Url); +})); From 57a7b931fb88375bd604f27ed13745a610184a50 Mon Sep 17 00:00:00 2001 From: Shreyans Pathak Date: Mon, 20 Jan 2025 15:18:21 -0500 Subject: [PATCH 106/158] doc: `WeakSet` and `WeakMap` comparison details PR-URL: https://github.com/nodejs/node/pull/56648 Reviewed-By: Luigi Pinca Reviewed-By: James M Snell --- doc/api/assert.md | 84 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 60 insertions(+), 24 deletions(-) diff --git a/doc/api/assert.md b/doc/api/assert.md index 444815e0041432..2a317c14dae739 100644 --- a/doc/api/assert.md +++ b/doc/api/assert.md @@ -804,8 +804,10 @@ are recursively evaluated also by the following rules. * [`Map`][] keys and [`Set`][] items are compared unordered. * Recursion stops when both sides differ or both sides encounter a circular reference. -* [`WeakMap`][] and [`WeakSet`][] comparison does not rely on their values. See - below for further details. +* [`WeakMap`][] and [`WeakSet`][] instances are **not** compared structurally. + They are only equal if they reference the same object. Any comparison between + different `WeakMap` or `WeakSet` instances will result in inequality, + even if they contain the same entries. * [`RegExp`][] lastIndex, flags, and source are always compared, even if these are not enumerable properties. @@ -882,23 +884,40 @@ assert.deepStrictEqual({ [symbol1]: 1 }, { [symbol2]: 1 }); // } const weakMap1 = new WeakMap(); -const weakMap2 = new WeakMap([[{}, {}]]); -const weakMap3 = new WeakMap(); -weakMap3.unequal = true; +const weakMap2 = new WeakMap(); +const obj = {}; +weakMap1.set(obj, 'value'); +weakMap2.set(obj, 'value'); + +// Comparing different instances fails, even with same contents assert.deepStrictEqual(weakMap1, weakMap2); -// OK, because it is impossible to compare the entries +// AssertionError: Values have same structure but are not reference-equal: +// +// WeakMap { +// +// } + +// Comparing the same instance to itself succeeds +assert.deepStrictEqual(weakMap1, weakMap1); +// OK -// Fails because weakMap3 has a property that weakMap1 does not contain: -assert.deepStrictEqual(weakMap1, weakMap3); +const weakSet1 = new WeakSet(); +const weakSet2 = new WeakSet(); +weakSet1.add(obj); +weakSet2.add(obj); + +// Comparing different instances fails, even with same contents +assert.deepStrictEqual(weakSet1, weakSet2); // AssertionError: Expected inputs to be strictly deep-equal: // + actual - expected // -// WeakMap { -// + [items unknown] -// - [items unknown], -// - unequal: true -// } +// + WeakSet { } +// - WeakSet { } + +// Comparing the same instance to itself succeeds +assert.deepStrictEqual(weakSet1, weakSet1); +// OK ``` ```cjs @@ -974,23 +993,40 @@ assert.deepStrictEqual({ [symbol1]: 1 }, { [symbol2]: 1 }); // } const weakMap1 = new WeakMap(); -const weakMap2 = new WeakMap([[{}, {}]]); -const weakMap3 = new WeakMap(); -weakMap3.unequal = true; +const weakMap2 = new WeakMap(); +const obj = {}; +weakMap1.set(obj, 'value'); +weakMap2.set(obj, 'value'); + +// Comparing different instances fails, even with same contents assert.deepStrictEqual(weakMap1, weakMap2); -// OK, because it is impossible to compare the entries +// AssertionError: Values have same structure but are not reference-equal: +// +// WeakMap { +// +// } + +// Comparing the same instance to itself succeeds +assert.deepStrictEqual(weakMap1, weakMap1); +// OK -// Fails because weakMap3 has a property that weakMap1 does not contain: -assert.deepStrictEqual(weakMap1, weakMap3); +const weakSet1 = new WeakSet(); +const weakSet2 = new WeakSet(); +weakSet1.add(obj); +weakSet2.add(obj); + +// Comparing different instances fails, even with same contents +assert.deepStrictEqual(weakSet1, weakSet2); // AssertionError: Expected inputs to be strictly deep-equal: // + actual - expected // -// WeakMap { -// + [items unknown] -// - [items unknown], -// - unequal: true -// } +// + WeakSet { } +// - WeakSet { } + +// Comparing the same instance to itself succeeds +assert.deepStrictEqual(weakSet1, weakSet1); +// OK ``` If the values are not equal, an [`AssertionError`][] is thrown with a `message` From e1e1200b7917c1a2adc666844ddef7486a01fc0e Mon Sep 17 00:00:00 2001 From: Dario Piotrowicz Date: Tue, 21 Jan 2025 17:16:17 +0000 Subject: [PATCH 107/158] doc: clarify cjs/esm diff in `queueMicrotask()` vs `process.nextTick()` the section comparing `queueMicrotask()` and `process.nextTick()` doesn't address the different scheduling behavior that the two functions have in cjs and esm modules, the section's introductory mjs example also provides an incorrect output, the changes here address such by explaining the difference between the two module types and updating the example accordingly PR-URL: https://github.com/nodejs/node/pull/56659 Fixes: https://github.com/nodejs/node/issues/45048 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell Reviewed-By: Luigi Pinca --- doc/api/process.md | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/doc/api/process.md b/doc/api/process.md index 020590868fafe5..a4d83c5246f828 100644 --- a/doc/api/process.md +++ b/doc/api/process.md @@ -3006,34 +3006,40 @@ function definitelyAsync(arg, cb) { ### When to use `queueMicrotask()` vs. `process.nextTick()` -The [`queueMicrotask()`][] API is an alternative to `process.nextTick()` that -also defers execution of a function using the same microtask queue used to -execute the then, catch, and finally handlers of resolved promises. Within -Node.js, every time the "next tick queue" is drained, the microtask queue +The [`queueMicrotask()`][] API is an alternative to `process.nextTick()` that instead of using the +"next tick queue" defers execution of a function using the same microtask queue used to execute the +then, catch, and finally handlers of resolved promises. + +Within Node.js, every time the "next tick queue" is drained, the microtask queue is drained immediately after. +So in CJS modules `process.nextTick()` callbacks are always run before `queueMicrotask()` ones. +However since ESM modules are processed already as part of the microtask queue, there +`queueMicrotask()` callbacks are always exectued before `process.nextTick()` ones since Node.js +is already in the process of draining the microtask queue. + ```mjs import { nextTick } from 'node:process'; -Promise.resolve().then(() => console.log(2)); -queueMicrotask(() => console.log(3)); -nextTick(() => console.log(1)); +Promise.resolve().then(() => console.log('resolve')); +queueMicrotask(() => console.log('microtask')); +nextTick(() => console.log('nextTick')); // Output: -// 1 -// 2 -// 3 +// resolve +// microtask +// nextTick ``` ```cjs const { nextTick } = require('node:process'); -Promise.resolve().then(() => console.log(2)); -queueMicrotask(() => console.log(3)); -nextTick(() => console.log(1)); +Promise.resolve().then(() => console.log('resolve')); +queueMicrotask(() => console.log('microtask')); +nextTick(() => console.log('nextTick')); // Output: -// 1 -// 2 -// 3 +// nextTick +// resolve +// microtask ``` For _most_ userland use cases, the `queueMicrotask()` API provides a portable From 02f36ca11bb77c3651efba4c82dad5d2895fc122 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Wed, 22 Jan 2025 13:54:31 +0100 Subject: [PATCH 108/158] tools: do not throw on missing `create-release-proposal.sh` PR-URL: https://github.com/nodejs/node/pull/56695 Reviewed-By: Marco Ippolito Reviewed-By: Pietro Marchini --- .github/workflows/create-release-proposal.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/create-release-proposal.yml b/.github/workflows/create-release-proposal.yml index 55bc892eb909a4..33426bdcda4a5b 100644 --- a/.github/workflows/create-release-proposal.yml +++ b/.github/workflows/create-release-proposal.yml @@ -71,9 +71,11 @@ jobs: git config --local user.name "Node.js GitHub Bot" - name: Start git node release prepare + # `git update-index` tells git to ignore future changes to the `.sh` file, + # `|| true` is there to ignore the error if such file doesn't exist yet. # The curl command is to make sure we run the version of the script corresponding to the current workflow. run: | - git update-index --assume-unchanged tools/actions/create-release-proposal.sh + git update-index --assume-unchanged tools/actions/create-release-proposal.sh || true curl -fsSLo tools/actions/create-release-proposal.sh https://github.com/${GITHUB_REPOSITORY}/raw/${GITHUB_SHA}/tools/actions/create-release-proposal.sh ./tools/actions/create-release-proposal.sh "${RELEASE_DATE}" "${RELEASE_LINE}" "${GITHUB_ACTOR}" env: From eb3148fb5c51041ab2025f03b71497d4ecc7b69a Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Wed, 22 Jan 2025 12:09:00 -0300 Subject: [PATCH 109/158] test: use --permission instead of --experimental-permission PR-URL: https://github.com/nodejs/node/pull/56685 Reviewed-By: Marco Ippolito Reviewed-By: Colin Ihrig Reviewed-By: Pietro Marchini Reviewed-By: LiviaMedeiros Reviewed-By: Luigi Pinca Reviewed-By: Ruben Bridgewater --- test/parallel/test-permission-dc-worker-threads.js | 2 +- test/parallel/test-runner-module-mocking.js | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/parallel/test-permission-dc-worker-threads.js b/test/parallel/test-permission-dc-worker-threads.js index 73cbf02981fa88..4fdb566f9e1701 100644 --- a/test/parallel/test-permission-dc-worker-threads.js +++ b/test/parallel/test-permission-dc-worker-threads.js @@ -1,4 +1,4 @@ -// Flags: --experimental-permission --allow-fs-read=* --experimental-test-module-mocks +// Flags: --permission --allow-fs-read=* --experimental-test-module-mocks 'use strict'; const common = require('../common'); diff --git a/test/parallel/test-runner-module-mocking.js b/test/parallel/test-runner-module-mocking.js index 7e9e49eefeb58e..cb40df98147302 100644 --- a/test/parallel/test-runner-module-mocking.js +++ b/test/parallel/test-runner-module-mocking.js @@ -655,7 +655,7 @@ test('should throw ERR_ACCESS_DENIED when permission model is enabled', async (t const cwd = fixtures.path('test-runner'); const fixture = fixtures.path('test-runner', 'mock-nm.js'); const args = [ - '--experimental-permission', + '--permission', '--allow-fs-read=*', '--experimental-test-module-mocks', fixture, @@ -674,7 +674,7 @@ test('should work when --allow-worker is passed and permission model is enabled' const cwd = fixtures.path('test-runner'); const fixture = fixtures.path('test-runner', 'mock-nm.js'); const args = [ - '--experimental-permission', + '--permission', '--allow-fs-read=*', '--allow-worker', '--experimental-test-module-mocks', From 7347d34053a201c9bfc45659959938e5dfe33149 Mon Sep 17 00:00:00 2001 From: Vitalii Akimov Date: Sun, 29 Dec 2024 20:36:54 +0000 Subject: [PATCH 110/158] module: fixing url change in load sync hook chain Fixes: https://github.com/nodejs/node/issues/56376 PR-URL: https://github.com/nodejs/node/pull/56402 Reviewed-By: Jacob Smith Reviewed-By: Antoine du Hamel Reviewed-By: Joyee Cheung Reviewed-By: Matteo Collina Reviewed-By: James M Snell --- lib/internal/modules/cjs/loader.js | 2 +- ...st-module-hooks-load-url-change-import.mjs | 25 ++++++++++++++++ ...st-module-hooks-load-url-change-require.js | 30 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 test/module-hooks/test-module-hooks-load-url-change-import.mjs create mode 100644 test/module-hooks/test-module-hooks-load-url-change-require.js diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 0bc421e172303b..a558185e08ddb1 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -1144,7 +1144,7 @@ function getDefaultLoad(url, filename) { return function defaultLoad(urlFromHook, context) { // If the url is the same as the original one, save the conversion. const isLoadingOriginalModule = (urlFromHook === url); - const filenameFromHook = isLoadingOriginalModule ? filename : convertURLToCJSFilename(url); + const filenameFromHook = isLoadingOriginalModule ? filename : convertURLToCJSFilename(urlFromHook); const source = defaultLoadImpl(filenameFromHook, context.format); // Format from context is directly returned, because format detection should only be // done after the entire load chain is completed. diff --git a/test/module-hooks/test-module-hooks-load-url-change-import.mjs b/test/module-hooks/test-module-hooks-load-url-change-import.mjs new file mode 100644 index 00000000000000..a65975ab1fad16 --- /dev/null +++ b/test/module-hooks/test-module-hooks-load-url-change-import.mjs @@ -0,0 +1,25 @@ +import { mustCall } from '../common/index.mjs'; +import assert from 'node:assert'; +import { registerHooks } from 'node:module'; +import { fileURL } from '../common/fixtures.mjs'; + +// This tests shows the url parameter in `load` can be changed in the `nextLoad` call +// It changes `foo` package name into `redirected-fs` and then loads `redirected-fs` + +const hook = registerHooks({ + resolve(specifier, context, nextResolve) { + assert.strictEqual(specifier, 'foo'); + return { + url: 'foo://bar', + shortCircuit: true, + }; + }, + load: mustCall(function load(url, context, nextLoad) { + assert.strictEqual(url, 'foo://bar'); + return nextLoad(fileURL('module-hooks', 'redirected-fs.js').href, context); + }), +}); + +assert.strictEqual((await import('foo')).exports_for_test, 'redirected fs'); + +hook.deregister(); diff --git a/test/module-hooks/test-module-hooks-load-url-change-require.js b/test/module-hooks/test-module-hooks-load-url-change-require.js new file mode 100644 index 00000000000000..7f10110cda8c50 --- /dev/null +++ b/test/module-hooks/test-module-hooks-load-url-change-require.js @@ -0,0 +1,30 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { registerHooks } = require('module'); +const fixtures = require('../common/fixtures'); + +// This tests shows the url parameter in `load` can be changed in the `nextLoad` call +// It changes `foo` package name into `redirected-fs` and then loads `redirected-fs` + +const hook = registerHooks({ + resolve(specifier, context, nextResolve) { + assert.strictEqual(specifier, 'foo'); + return { + url: 'foo://bar', + shortCircuit: true, + }; + }, + load: common.mustCall(function load(url, context, nextLoad) { + assert.strictEqual(url, 'foo://bar'); + return nextLoad( + fixtures.fileURL('module-hooks', 'redirected-fs.js').href, + context, + ); + }), +}); + +assert.strictEqual(require('foo').exports_for_test, 'redirected fs'); + +hook.deregister(); From 538e19489fedf1882c3c668dec532e0b1fc78efe Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 23 Jan 2025 09:36:52 +0100 Subject: [PATCH 111/158] worker: refactor stdio to improve performance Signed-off-by: Matteo Collina PR-URL: https://github.com/nodejs/node/pull/56630 Reviewed-By: Yagiz Nizipli Reviewed-By: Robert Nagy Reviewed-By: Paolo Insogna Reviewed-By: Luigi Pinca --- lib/internal/worker.js | 6 ++++-- lib/internal/worker/io.js | 35 ++++++++++++++++++++++------------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/lib/internal/worker.js b/lib/internal/worker.js index dcc9651802c4e4..e5a2cd06892c75 100644 --- a/lib/internal/worker.js +++ b/lib/internal/worker.js @@ -340,9 +340,11 @@ class Worker extends EventEmitter { { const { stream, chunks } = message; const readable = this[kParentSideStdio][stream]; - ArrayPrototypeForEach(chunks, ({ chunk, encoding }) => { + // This is a hot path, use a for(;;) loop + for (let i = 0; i < chunks.length; i++) { + const { chunk, encoding } = chunks[i]; readable.push(chunk, encoding); - }); + } return; } case messageTypes.STDIO_WANTS_MORE_DATA: diff --git a/lib/internal/worker/io.js b/lib/internal/worker/io.js index 2b28c6a2487b11..29c7914982b67a 100644 --- a/lib/internal/worker/io.js +++ b/lib/internal/worker/io.js @@ -1,9 +1,7 @@ 'use strict'; const { - ArrayPrototypeForEach, - ArrayPrototypeMap, - ArrayPrototypePush, + Array, FunctionPrototypeBind, FunctionPrototypeCall, ObjectAssign, @@ -77,7 +75,7 @@ const kOnMessage = Symbol('kOnMessage'); const kOnMessageError = Symbol('kOnMessageError'); const kPort = Symbol('kPort'); const kWaitingStreams = Symbol('kWaitingStreams'); -const kWritableCallbacks = Symbol('kWritableCallbacks'); +const kWritableCallback = Symbol('kWritableCallback'); const kStartedReading = Symbol('kStartedReading'); const kStdioWantsMoreDataCallback = Symbol('kStdioWantsMoreDataCallback'); const kCurrentlyReceivingPorts = @@ -282,20 +280,29 @@ class WritableWorkerStdio extends Writable { super({ decodeStrings: false }); this[kPort] = port; this[kName] = name; - this[kWritableCallbacks] = []; + this[kWritableCallback] = null; } _writev(chunks, cb) { + const toSend = new Array(chunks.length); + + // We avoid .map() because it's a hot path + for (let i = 0; i < chunks.length; i++) { + const { chunk, encoding } = chunks[i]; + toSend[i] = { chunk, encoding }; + } + this[kPort].postMessage({ type: messageTypes.STDIO_PAYLOAD, stream: this[kName], - chunks: ArrayPrototypeMap(chunks, - ({ chunk, encoding }) => ({ chunk, encoding })), + chunks: toSend, }); if (process._exiting) { cb(); } else { - ArrayPrototypePush(this[kWritableCallbacks], cb); + // Only one writev happens at any given time, + // so we can safely overwrite the callback. + this[kWritableCallback] = cb; if (this[kPort][kWaitingStreams]++ === 0) this[kPort].ref(); } @@ -311,11 +318,13 @@ class WritableWorkerStdio extends Writable { } [kStdioWantsMoreDataCallback]() { - const cbs = this[kWritableCallbacks]; - this[kWritableCallbacks] = []; - ArrayPrototypeForEach(cbs, (cb) => cb()); - if ((this[kPort][kWaitingStreams] -= cbs.length) === 0) - this[kPort].unref(); + const cb = this[kWritableCallback]; + if (cb) { + this[kWritableCallback] = null; + cb(); + if (--this[kPort][kWaitingStreams] === 0) + this[kPort].unref(); + } } } From 9f99a6acb59f8a175a3418d5fc4e9eacde43ebc9 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Thu, 23 Jan 2025 16:17:31 +0100 Subject: [PATCH 112/158] module: use more defensive code when handling SWC errors PR-URL: https://github.com/nodejs/node/pull/56646 Reviewed-By: Marco Ippolito Reviewed-By: James M Snell --- lib/internal/modules/typescript.js | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/internal/modules/typescript.js b/lib/internal/modules/typescript.js index 689788b09853c4..6abfc707657b92 100644 --- a/lib/internal/modules/typescript.js +++ b/lib/internal/modules/typescript.js @@ -1,5 +1,8 @@ 'use strict'; +const { + ObjectPrototypeHasOwnProperty, +} = primordials; const { validateBoolean, validateOneOf, @@ -12,7 +15,6 @@ const { assertTypeScript, isUnderNodeModules, kEmptyObject } = require('internal/util'); const { - ERR_INTERNAL_ASSERTION, ERR_INVALID_TYPESCRIPT_SYNTAX, ERR_UNSUPPORTED_NODE_MODULES_TYPE_STRIPPING, ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX, @@ -55,15 +57,16 @@ function parseTypeScript(source, options) { * Amaro v0.3.0 (from SWC v1.10.7) throws an object with `message` and `code` properties. * It allows us to distinguish between invalid syntax and unsupported syntax. */ - switch (error.code) { + switch (error?.code) { case 'UnsupportedSyntax': throw new ERR_UNSUPPORTED_TYPESCRIPT_SYNTAX(error.message); case 'InvalidSyntax': throw new ERR_INVALID_TYPESCRIPT_SYNTAX(error.message); default: - // SWC will throw strings when something goes wrong. - // Check if has the `message` property or treat it as a string. - throw new ERR_INTERNAL_ASSERTION(error.message ?? error); + // SWC may throw strings when something goes wrong. + if (typeof error === 'string') { assert.fail(error); } + assert(error != null && ObjectPrototypeHasOwnProperty(error, 'message')); + assert.fail(error.message); } } } From 4ade128184310f20a646a5531e28923f2adc97e5 Mon Sep 17 00:00:00 2001 From: Rafael Gonzaga Date: Thu, 23 Jan 2025 16:05:11 -0300 Subject: [PATCH 113/158] doc: add RafaelGSS as latest sec release stewards MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56682 Refs: https://github.com/nodejs-private/security-release/pull/41 Reviewed-By: Richard Lau Reviewed-By: Ulises Gascón Reviewed-By: Marco Ippolito Reviewed-By: Matteo Collina Reviewed-By: Antoine du Hamel --- doc/contributing/security-release-process.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/contributing/security-release-process.md b/doc/contributing/security-release-process.md index d8a871bd96922c..7027b16a00d5e5 100644 --- a/doc/contributing/security-release-process.md +++ b/doc/contributing/security-release-process.md @@ -35,6 +35,7 @@ The current security stewards are documented in the main Node.js | NodeSource | Rafael | 2024-Apr-03 | | NodeSource | Rafael | 2024-Apr-10 | | NodeSource | Rafael | 2024-Jul-08 | +| NodeSource | Rafael | 2025-Jan-21 | | Datadog | Bryan | | | IBM | Joe | | | Platformatic | Matteo | | From 8a61aaa734364a286296f10eb15ce7990d1b439a Mon Sep 17 00:00:00 2001 From: Shreyans Pathak Date: Thu, 23 Jan 2025 14:24:28 -0500 Subject: [PATCH 114/158] doc: fix inconsistencies in `WeakSet` and `WeakMap` comparison details MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56683 Reviewed-By: Ruben Bridgewater Reviewed-By: Ulises Gascón Reviewed-By: Jason Zhang Reviewed-By: Luigi Pinca --- doc/api/assert.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/doc/api/assert.md b/doc/api/assert.md index 2a317c14dae739..6ee9d2f78fa740 100644 --- a/doc/api/assert.md +++ b/doc/api/assert.md @@ -909,11 +909,12 @@ weakSet2.add(obj); // Comparing different instances fails, even with same contents assert.deepStrictEqual(weakSet1, weakSet2); -// AssertionError: Expected inputs to be strictly deep-equal: +// AssertionError: Values have same structure but are not reference-equal: // + actual - expected // -// + WeakSet { } -// - WeakSet { } +// WeakSet { +// +// } // Comparing the same instance to itself succeeds assert.deepStrictEqual(weakSet1, weakSet1); @@ -1018,11 +1019,12 @@ weakSet2.add(obj); // Comparing different instances fails, even with same contents assert.deepStrictEqual(weakSet1, weakSet2); -// AssertionError: Expected inputs to be strictly deep-equal: +// AssertionError: Values have same structure but are not reference-equal: // + actual - expected // -// + WeakSet { } -// - WeakSet { } +// WeakSet { +// +// } // Comparing the same instance to itself succeeds assert.deepStrictEqual(weakSet1, weakSet1); From 69cb44e315266be7530d80a56933ea6063ca163f Mon Sep 17 00:00:00 2001 From: Daniel Lemire Date: Thu, 23 Jan 2025 14:24:38 -0500 Subject: [PATCH 115/158] tools: fix tools-deps-update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56684 Reviewed-By: Antoine du Hamel Reviewed-By: James M Snell Reviewed-By: Michaël Zasso Reviewed-By: Michael Dawson --- .github/workflows/tools.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml index abbdc2824817f2..b768638d9d9cbd 100644 --- a/.github/workflows/tools.yml +++ b/.github/workflows/tools.yml @@ -284,6 +284,10 @@ jobs: tail -n1 temp-output | grep "NEW_VERSION=" >> "$GITHUB_ENV" || true rm temp-output steps: + - name: Setup Git config + run: | + git config --global user.name "Node.js GitHub Bot" + git config --global user.email "github-bot@iojs.org" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 if: github.event_name == 'schedule' || inputs.id == 'all' || inputs.id == matrix.id with: @@ -301,17 +305,15 @@ jobs: if: env.COMMIT_MSG == '' && (github.event_name == 'schedule' || inputs.id == 'all' || inputs.id == matrix.id) run: | echo "COMMIT_MSG=${{ matrix.subsystem }}: update ${{ matrix.id }} to ${{ env.NEW_VERSION }}" >> "$GITHUB_ENV" - - uses: gr2m/create-or-update-pull-request-action@86ec1766034c8173518f61d2075cc2a173fb8c97 # v1.9.4 + - uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5 if: github.event_name == 'schedule' || inputs.id == 'all' || inputs.id == matrix.id # Creates a PR or update the Action's existing PR, or # no-op if the base branch is already up-to-date. - env: - GITHUB_TOKEN: ${{ secrets.GH_USER_TOKEN }} with: - author: Node.js GitHub Bot - body: This is an automated update of ${{ matrix.id }} to ${{ env.NEW_VERSION }}. + token: ${{ secrets.GH_USER_TOKEN }} branch: actions/tools-update-${{ matrix.id }} # Custom branch *just* for this Action. + delete-branch: true commit-message: ${{ env.COMMIT_MSG }} labels: ${{ matrix.label }} title: '${{ matrix.subsystem }}: update ${{ matrix.id }} to ${{ env.NEW_VERSION }}' - update-pull-request-title-and-body: true + body: This is an automated update of ${{ matrix.id }} to ${{ env.NEW_VERSION }}. From f885496d9c22512ce44ccfe359128c049239ae62 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Thu, 23 Jan 2025 23:14:10 +0100 Subject: [PATCH 116/158] test: fix localization data for ICU 74.2 PR-URL: https://github.com/nodejs/node/pull/56661 Refs: https://github.com/nodejs/node/pull/55618 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca Reviewed-By: LiviaMedeiros Reviewed-By: Ruben Bridgewater Reviewed-By: Michael Dawson --- test/fixtures/icu/localizationData-v74.2.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/fixtures/icu/localizationData-v74.2.json b/test/fixtures/icu/localizationData-v74.2.json index 65671ba5acb299..1cca79672ac25e 100644 --- a/test/fixtures/icu/localizationData-v74.2.json +++ b/test/fixtures/icu/localizationData-v74.2.json @@ -20,14 +20,14 @@ "dateTimeFormats": { "en": "7/25/1980, 1:35:33 AM", "zh": "1980/7/25 01:35:33", - "hi": "25/7/1980, 1:35:33 am", + "hi": "25/7/1980, पू 1:35:33", "es": "25/7/1980, 1:35:33", "fr": "25/07/1980 01:35:33", - "ar": "٢٥‏/٧‏/١٩٨٠، ١:٣٥:٣٣ ص", + "ar": "25‏/7‏/1980، 1:35:33 ص", "bn": "২৫/৭/১৯৮০, ১:৩৫:৩৩ AM", "ru": "25.07.1980, 01:35:33", "pt": "25/07/1980, 01:35:33", - "ur": "25/7/1980، 1:35:33 AM", + "ur": "25/7/1980، 1:35:33 ق.د.", "id": "25/7/1980, 01.35.33", "de": "25.7.1980, 01:35:33", "ja": "1980/7/25 1:35:33", @@ -41,7 +41,7 @@ "hi": "25/7/1980", "es": "25/7/1980", "fr": "25/07/1980", - "ar": "٢٥‏/٧‏/١٩٨٠", + "ar": "25‏/7‏/1980", "bn": "২৫/৭/১৯৮০", "ru": "25.07.1980", "pt": "25/07/1980", @@ -77,7 +77,7 @@ "hi": "2,75,760.913", "es": "275.760,913", "fr": "275 760,913", - "ar": "٢٧٥٬٧٦٠٫٩١٣", + "ar": "275,760.913", "bn": "২,৭৫,৭৬০.৯১৩", "ru": "275 760,913", "pt": "275.760,913", @@ -113,7 +113,7 @@ "hi": "5,86,920.617 घंटे पहले", "es": "hace 586.920,617 horas", "fr": "il y a 586 920,617 heures", - "ar": "قبل ٥٨٦٬٩٢٠٫٦١٧ ساعة", + "ar": "قبل 586,920.617 ساعة", "bn": "৫,৮৬,৯২০.৬১৭ ঘন্টা আগে", "ru": "586 920,617 часа назад", "pt": "há 586.920,617 horas", From 2fb007fdce451c2aa08d37f84a145ddfecd1f94f Mon Sep 17 00:00:00 2001 From: Chengzhong Wu Date: Thu, 23 Jan 2025 22:44:10 +0000 Subject: [PATCH 117/158] lib: allow skipping source maps in node_modules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Files in `node_modules` are not authored by the user directly and the original sources are less relevant to the user. Skipping source maps in `node_modules` improves the general performance. Add `module.setSourceMapsSupport(enabled, options)` to skip source maps in `node_modules` if it is needed. This moves all source maps related API to `node:module` and this a step to promote the source maps API to stable. PR-URL: https://github.com/nodejs/node/pull/56639 Reviewed-By: Vinícius Lourenço Claro Cardoso Reviewed-By: Joyee Cheung Reviewed-By: Matteo Collina Reviewed-By: Antoine du Hamel --- benchmark/es/error-stack.js | 26 ++++++-- .../error-stack/simple-error-stack.js | 16 +++++ .../error-stack/simple-error-stack.ts | 19 ++++++ doc/api/module.md | 40 ++++++++++++ doc/api/process.md | 9 ++- lib/internal/bootstrap/node.js | 17 +++-- lib/internal/modules/esm/module_job.js | 4 +- lib/internal/process/pre_execution.js | 12 +++- lib/internal/source_map/source_map_cache.js | 65 +++++++++++++------ lib/module.js | 18 +++-- .../error-stack/enclosing-call-site-min.js | 3 + .../error-stack/enclosing-call-site.js | 27 ++++++++ .../error-stack/enclosing-call-site.js.map | 8 +++ .../output/source_map_disabled_by_api.js | 26 ++++++-- .../source_map_disabled_by_process_api.js | 42 ++++++++++++ ...ource_map_disabled_by_process_api.snapshot | 12 ++++ .../output/source_map_enabled_by_api.js | 26 ++++++-- .../source_map_enabled_by_api_node_modules.js | 48 ++++++++++++++ ...e_map_enabled_by_api_node_modules.snapshot | 12 ++++ .../source_map_enabled_by_process_api.js | 39 +++++++++++ ...source_map_enabled_by_process_api.snapshot | 12 ++++ .../output/source_map_prepare_stack_trace.js | 12 +++- .../test-module-setsourcemapssupport.js | 43 ++++++++++++ test/parallel/test-node-output-sourcemaps.mjs | 3 + 24 files changed, 491 insertions(+), 48 deletions(-) create mode 100644 benchmark/fixtures/node_modules/error-stack/simple-error-stack.js create mode 100644 benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts create mode 100644 test/fixtures/source-map/node_modules/error-stack/enclosing-call-site-min.js create mode 100644 test/fixtures/source-map/node_modules/error-stack/enclosing-call-site.js create mode 100644 test/fixtures/source-map/node_modules/error-stack/enclosing-call-site.js.map create mode 100644 test/fixtures/source-map/output/source_map_disabled_by_process_api.js create mode 100644 test/fixtures/source-map/output/source_map_disabled_by_process_api.snapshot create mode 100644 test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.js create mode 100644 test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.snapshot create mode 100644 test/fixtures/source-map/output/source_map_enabled_by_process_api.js create mode 100644 test/fixtures/source-map/output/source_map_enabled_by_process_api.snapshot create mode 100644 test/parallel/test-module-setsourcemapssupport.js diff --git a/benchmark/es/error-stack.js b/benchmark/es/error-stack.js index 907f308ea41558..3b373dcdae63c8 100644 --- a/benchmark/es/error-stack.js +++ b/benchmark/es/error-stack.js @@ -2,13 +2,19 @@ const common = require('../common.js'); const modPath = require.resolve('../fixtures/simple-error-stack.js'); +const nodeModulePath = require.resolve('../fixtures/node_modules/error-stack/simple-error-stack.js'); +const Module = require('node:module'); const bench = common.createBenchmark(main, { - method: ['without-sourcemap', 'sourcemap'], + method: [ + 'without-sourcemap', + 'sourcemap', + 'node-modules-without-sourcemap', + 'node-module-sourcemap'], n: [1e5], }); -function runN(n) { +function runN(n, modPath) { delete require.cache[modPath]; const mod = require(modPath); bench.start(); @@ -22,11 +28,23 @@ function main({ n, method }) { switch (method) { case 'without-sourcemap': process.setSourceMapsEnabled(false); - runN(n); + runN(n, modPath); break; case 'sourcemap': process.setSourceMapsEnabled(true); - runN(n); + runN(n, modPath); + break; + case 'node-modules-without-sourcemap': + Module.setSourceMapsSupport(true, { + nodeModules: false, + }); + runN(n, nodeModulePath); + break; + case 'node-modules-sourcemap': + Module.setSourceMapsSupport(true, { + nodeModules: true, + }); + runN(n, nodeModulePath); break; default: throw new Error(`Unexpected method "${method}"`); diff --git a/benchmark/fixtures/node_modules/error-stack/simple-error-stack.js b/benchmark/fixtures/node_modules/error-stack/simple-error-stack.js new file mode 100644 index 00000000000000..33c3ad7f324d40 --- /dev/null +++ b/benchmark/fixtures/node_modules/error-stack/simple-error-stack.js @@ -0,0 +1,16 @@ +'use strict'; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.simpleErrorStack = simpleErrorStack; +// Compile with `tsc --inlineSourceMap benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts`. +var lorem = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'; +function simpleErrorStack() { + [1].map(function () { + try { + lorem.BANG(); + } + catch (e) { + return e.stack; + } + }); +} +//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoic2ltcGxlLWVycm9yLXN0YWNrLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsic2ltcGxlLWVycm9yLXN0YWNrLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBLFlBQVksQ0FBQzs7QUFpQlgsNENBQWdCO0FBZmxCLGlGQUFpRjtBQUVqRixJQUFNLEtBQUssR0FBRywrYkFBK2IsQ0FBQztBQUU5YyxTQUFTLGdCQUFnQjtJQUN2QixDQUFDLENBQUMsQ0FBQyxDQUFDLEdBQUcsQ0FBQztRQUNOLElBQUksQ0FBQztZQUNGLEtBQWEsQ0FBQyxJQUFJLEVBQUUsQ0FBQztRQUN4QixDQUFDO1FBQUMsT0FBTyxDQUFDLEVBQUUsQ0FBQztZQUNYLE9BQU8sQ0FBQyxDQUFDLEtBQUssQ0FBQztRQUNqQixDQUFDO0lBQ0gsQ0FBQyxDQUFDLENBQUE7QUFDSixDQUFDIn0= diff --git a/benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts b/benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts new file mode 100644 index 00000000000000..58d3d7eedd2f1b --- /dev/null +++ b/benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts @@ -0,0 +1,19 @@ +'use strict'; + +// Compile with `tsc --inlineSourceMap benchmark/fixtures/node_modules/error-stack/simple-error-stack.ts`. + +const lorem = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'; + +function simpleErrorStack() { + [1].map(() => { + try { + (lorem as any).BANG(); + } catch (e) { + return e.stack; + } + }) +} + +export { + simpleErrorStack, +}; diff --git a/doc/api/module.md b/doc/api/module.md index 6b9d6cf035575d..586a70329c0605 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -1590,6 +1590,20 @@ import { findSourceMap, SourceMap } from 'node:module'; const { findSourceMap, SourceMap } = require('node:module'); ``` +### `module.getSourceMapsSupport()` + + + +* Returns: {Object} + * `enabled` {boolean} If the source maps support is enabled + * `nodeModules` {boolean} If the support is enabled for files in `node_modules`. + * `generatedCode` {boolean} If the support is enabled for generated code from `eval` or `new Function`. + +This method returns whether the [Source Map v3][Source Map] support for stack +traces is enabled. + @@ -1609,6 +1623,31 @@ added: `path` is the resolved path for the file for which a corresponding source map should be fetched. +### `module.setSourceMapsSupport(enabled[, options])` + + + +* `enabled` {boolean} Enable the source map support. +* `options` {Object} Optional + * `nodeModules` {boolean} If enabling the support for files in + `node_modules`. **Default:** `false`. + * `generatedCode` {boolean} If enabling the support for generated code from + `eval` or `new Function`. **Default:** `false`. + +This function enables or disables the [Source Map v3][Source Map] support for +stack traces. + +It provides same features as launching Node.js process with commandline options +`--enable-source-maps`, with additional options to alter the support for files +in `node_modules` or generated codes. + +Only source maps in JavaScript files that are loaded after source maps has been +enabled will be parsed and loaded. Preferably, use the commandline options +`--enable-source-maps` to avoid losing track of source maps of modules loaded +before this API call. + ### Class: `module.SourceMap` -> Stability: 1 - Experimental +> Stability: 1 - Experimental: Use [`module.setSourceMapsSupport()`][] instead. * `val` {boolean} @@ -4000,6 +4000,9 @@ It provides same features as launching Node.js process with commandline options Only source maps in JavaScript files that are loaded after source maps has been enabled will be parsed and loaded. +This implies calling `module.setSourceMapsSupport()` with an option +`{ nodeModules: true, generatedCode: true }`. + ## `process.setUncaughtExceptionCaptureCallback(fn)` -> Stability: 1 - Experimental +> Stability: 1 - Experimental: Use [`module.getSourceMapsSupport()`][] instead. * {boolean} @@ -4501,7 +4504,9 @@ cases: [`console.error()`]: console.md#consoleerrordata-args [`console.log()`]: console.md#consolelogdata-args [`domain`]: domain.md +[`module.getSourceMapsSupport()`]: module.md#modulegetsourcemapssupport [`module.isBuiltin(id)`]: module.md#moduleisbuiltinmodulename +[`module.setSourceMapsSupport()`]: module.md#modulesetsourcemapssupportenabled-options [`net.Server`]: net.md#class-netserver [`net.Socket`]: net.md#class-netsocket [`os.constants.dlopen`]: os.md#dlopen-constants diff --git a/lib/internal/bootstrap/node.js b/lib/internal/bootstrap/node.js index de2f0e00e14092..5b24a44741b0d6 100644 --- a/lib/internal/bootstrap/node.js +++ b/lib/internal/bootstrap/node.js @@ -368,8 +368,8 @@ internalBinding('process_methods').setEmitWarningSync(emitWarningSync); { const { - getSourceMapsEnabled, - setSourceMapsEnabled, + getSourceMapsSupport, + setSourceMapsSupport, maybeCacheGeneratedSourceMap, } = require('internal/source_map/source_map_cache'); const { @@ -381,10 +381,19 @@ internalBinding('process_methods').setEmitWarningSync(emitWarningSync); enumerable: true, configurable: true, get() { - return getSourceMapsEnabled(); + return getSourceMapsSupport().enabled; }, }); - process.setSourceMapsEnabled = setSourceMapsEnabled; + process.setSourceMapsEnabled = function setSourceMapsEnabled(val) { + setSourceMapsSupport(val, { + __proto__: null, + // TODO(legendecas): In order to smoothly improve the source map support, + // skip source maps in node_modules and generated code with + // `process.setSourceMapsEnabled(true)` in a semver major version. + nodeModules: val, + generatedCode: val, + }); + }; // The C++ land calls back to maybeCacheGeneratedSourceMap() // when code is generated by user with eval() or new Function() // to cache the source maps from the evaluated code, if any. diff --git a/lib/internal/modules/esm/module_job.js b/lib/internal/modules/esm/module_job.js index 8039e2f57a500f..846a336d27547e 100644 --- a/lib/internal/modules/esm/module_job.js +++ b/lib/internal/modules/esm/module_job.js @@ -30,7 +30,7 @@ const { } = internalBinding('util'); const { decorateErrorStack, kEmptyObject } = require('internal/util'); const { - getSourceMapsEnabled, + getSourceMapsSupport, } = require('internal/source_map/source_map_cache'); const assert = require('internal/assert'); const resolvedPromise = PromiseResolve(); @@ -186,7 +186,7 @@ class ModuleJob extends ModuleJobBase { // of missing named export. This is currently not possible because // stack trace originates in module_job, not the file itself. A hidden // symbol with filename could be set in node_errors.cc to facilitate this. - if (!getSourceMapsEnabled() && + if (!getSourceMapsSupport().enabled && StringPrototypeIncludes(e.message, ' does not provide an export named')) { const splitStack = StringPrototypeSplit(e.stack, '\n'); diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index b3aba59674b82b..3d9ba5a1605912 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -608,9 +608,17 @@ function initializeESMLoader(forceDefaultLoader) { function initializeSourceMapsHandlers() { const { - setSourceMapsEnabled, + setSourceMapsSupport, } = require('internal/source_map/source_map_cache'); - setSourceMapsEnabled(getOptionValue('--enable-source-maps')); + const enabled = getOptionValue('--enable-source-maps'); + setSourceMapsSupport(enabled, { + __proto__: null, + // TODO(legendecas): In order to smoothly improve the source map support, + // skip source maps in node_modules and generated code with + // `--enable-source-maps` in a semver major version. + nodeModules: enabled, + generatedCode: enabled, + }); } function initializeFrozenIntrinsics() { diff --git a/lib/internal/source_map/source_map_cache.js b/lib/internal/source_map/source_map_cache.js index aaca27136e66a0..bdef338e3dd086 100644 --- a/lib/internal/source_map/source_map_cache.js +++ b/lib/internal/source_map/source_map_cache.js @@ -3,6 +3,7 @@ const { ArrayPrototypePush, JSONParse, + ObjectFreeze, RegExpPrototypeExec, SafeMap, StringPrototypeCodePointAt, @@ -15,7 +16,7 @@ let debug = require('internal/util/debuglog').debuglog('source_map', (fn) => { debug = fn; }); -const { validateBoolean } = require('internal/validators'); +const { validateBoolean, validateObject } = require('internal/validators'); const { setSourceMapsEnabled: setSourceMapsNative, } = internalBinding('errors'); @@ -23,7 +24,7 @@ const { defaultPrepareStackTrace, setInternalPrepareStackTrace, } = require('internal/errors'); -const { getLazy } = require('internal/util'); +const { getLazy, isUnderNodeModules, kEmptyObject } = require('internal/util'); const getModuleSourceMapCache = getLazy(() => { const { SourceMapCacheMap } = require('internal/source_map/source_map_cache_map'); @@ -45,30 +46,48 @@ const { fileURLToPath, pathToFileURL, URL, URLParse } = require('internal/url'); let SourceMap; // This is configured with --enable-source-maps during pre-execution. -let sourceMapsEnabled = false; -function getSourceMapsEnabled() { - return sourceMapsEnabled; +let sourceMapsSupport = ObjectFreeze({ + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); +function getSourceMapsSupport() { + // Return a read-only object. + return sourceMapsSupport; } /** * Enables or disables source maps programmatically. - * @param {boolean} val + * @param {boolean} enabled + * @param {object} options + * @param {boolean} [options.nodeModules] + * @param {boolean} [options.generatedCode] */ -function setSourceMapsEnabled(val) { - validateBoolean(val, 'val'); +function setSourceMapsSupport(enabled, options = kEmptyObject) { + validateBoolean(enabled, 'enabled'); + validateObject(options, 'options'); + + const { nodeModules = false, generatedCode = false } = options; + validateBoolean(nodeModules, 'options.nodeModules'); + validateBoolean(generatedCode, 'options.generatedCode'); - setSourceMapsNative(val); - if (val) { + setSourceMapsNative(enabled); + if (enabled) { const { prepareStackTraceWithSourceMaps, } = require('internal/source_map/prepare_stack_trace'); setInternalPrepareStackTrace(prepareStackTraceWithSourceMaps); - } else if (sourceMapsEnabled !== undefined) { - // Reset prepare stack trace callback only when disabling source maps. + } else { setInternalPrepareStackTrace(defaultPrepareStackTrace); } - sourceMapsEnabled = val; + sourceMapsSupport = ObjectFreeze({ + __proto__: null, + enabled, + nodeModules: nodeModules, + generatedCode: generatedCode, + }); } /** @@ -130,14 +149,18 @@ function extractSourceMapURLMagicComment(content) { * @param {string | undefined} sourceMapURL - the source map url */ function maybeCacheSourceMap(filename, content, moduleInstance, isGeneratedSource, sourceURL, sourceMapURL) { - const sourceMapsEnabled = getSourceMapsEnabled(); - if (!(process.env.NODE_V8_COVERAGE || sourceMapsEnabled)) return; + const support = getSourceMapsSupport(); + if (!(process.env.NODE_V8_COVERAGE || support.enabled)) return; const { normalizeReferrerURL } = require('internal/modules/helpers'); filename = normalizeReferrerURL(filename); if (filename === undefined) { // This is most likely an invalid filename in sourceURL of [eval]-wrapper. return; } + if (!support.nodeModules && isUnderNodeModules(filename)) { + // Skip file under node_modules if not enabled. + return; + } if (sourceMapURL === undefined) { sourceMapURL = extractSourceMapURLMagicComment(content); @@ -185,8 +208,8 @@ function maybeCacheSourceMap(filename, content, moduleInstance, isGeneratedSourc * @param {string} content - the eval'd source code */ function maybeCacheGeneratedSourceMap(content) { - const sourceMapsEnabled = getSourceMapsEnabled(); - if (!(process.env.NODE_V8_COVERAGE || sourceMapsEnabled)) return; + const support = getSourceMapsSupport(); + if (!(process.env.NODE_V8_COVERAGE || support.enabled || support.generated)) return; const sourceURL = extractSourceURLMagicComment(content); if (sourceURL === null) { @@ -352,6 +375,10 @@ function findSourceMap(sourceURL) { return undefined; } + if (!getSourceMapsSupport().nodeModules && isUnderNodeModules(sourceURL)) { + return undefined; + } + SourceMap ??= require('internal/source_map/source_map').SourceMap; try { if (RegExpPrototypeExec(kLeadingProtocol, sourceURL) === null) { @@ -377,8 +404,8 @@ function findSourceMap(sourceURL) { module.exports = { findSourceMap, - getSourceMapsEnabled, - setSourceMapsEnabled, + getSourceMapsSupport, + setSourceMapsSupport, maybeCacheSourceMap, maybeCacheGeneratedSourceMap, sourceMapCacheToObject, diff --git a/lib/module.js b/lib/module.js index a0317d06e0edb0..1217172afb3ccb 100644 --- a/lib/module.js +++ b/lib/module.js @@ -1,9 +1,15 @@ 'use strict'; -const { findSourceMap } = require('internal/source_map/source_map_cache'); +const { + findSourceMap, + getSourceMapsSupport, + setSourceMapsSupport, +} = require('internal/source_map/source_map_cache'); const { Module } = require('internal/modules/cjs/loader'); const { register } = require('internal/modules/esm/loader'); -const { SourceMap } = require('internal/source_map/source_map'); +const { + SourceMap, +} = require('internal/source_map/source_map'); const { constants, enableCompileCache, @@ -15,9 +21,7 @@ const { } = require('internal/modules/package_json_reader'); const { stripTypeScriptTypes } = require('internal/modules/typescript'); -Module.findSourceMap = findSourceMap; Module.register = register; -Module.SourceMap = SourceMap; Module.constants = constants; Module.enableCompileCache = enableCompileCache; Module.findPackageJSON = findPackageJSON; @@ -25,4 +29,10 @@ Module.flushCompileCache = flushCompileCache; Module.getCompileCacheDir = getCompileCacheDir; Module.stripTypeScriptTypes = stripTypeScriptTypes; +// SourceMap APIs +Module.findSourceMap = findSourceMap; +Module.SourceMap = SourceMap; +Module.getSourceMapsSupport = getSourceMapsSupport; +Module.setSourceMapsSupport = setSourceMapsSupport; + module.exports = Module; diff --git a/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site-min.js b/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site-min.js new file mode 100644 index 00000000000000..45b7ed2b219b86 --- /dev/null +++ b/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site-min.js @@ -0,0 +1,3 @@ +var functionA=function(){functionB()};function functionB(){functionC()}var functionC=function(){functionD()},functionD=function(){if(0 { + functionB() +} + +function functionB() { + functionC() +} + +const functionC = () => { + functionD() +} + +const functionD = () => { + (function functionE () { + if (Math.random() > 0) { + throw new Error('an error!') + } + })() +} + +const thrower = functionA + +try { + thrower() +} catch (err) { + throw err +} diff --git a/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site.js.map b/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site.js.map new file mode 100644 index 00000000000000..d0c785f26091cc --- /dev/null +++ b/test/fixtures/source-map/node_modules/error-stack/enclosing-call-site.js.map @@ -0,0 +1,8 @@ +{ +"version":3, +"file":"enclosing-call-site-min.js", +"lineCount":1, +"mappings":"AAAA,IAAMA,UAAYA,QAAA,EAAM,CACtBC,SAAA,EADsB,CAIxBA,SAASA,UAAS,EAAG,CACnBC,SAAA,EADmB,CAIrB,IAAMA,UAAYA,QAAA,EAAM,CACtBC,SAAA,EADsB,CAAxB,CAIMA,UAAYA,QAAA,EAAM,CAEpB,GAAoB,CAApB,CAAIC,IAAA,CAAKC,MAAL,EAAJ,CACE,KAAUC,MAAJ,CAAU,WAAV,CAAN,CAHkB,CAJxB,CAYMC,QAAUP,SAEhB,IAAI,CACFO,SAAA,EADE,CAEF,MAAOC,CAAP,CAAY,CACZ,KAAMA,EAAN,CADY;", +"sources":["enclosing-call-site.js"], +"names":["functionA","functionB","functionC","functionD","Math","random","Error","thrower","err"] +} diff --git a/test/fixtures/source-map/output/source_map_disabled_by_api.js b/test/fixtures/source-map/output/source_map_disabled_by_api.js index 8f455f26b6c9c4..1291f3583ac239 100644 --- a/test/fixtures/source-map/output/source_map_disabled_by_api.js +++ b/test/fixtures/source-map/output/source_map_disabled_by_api.js @@ -2,11 +2,23 @@ 'use strict'; require('../../../common'); -const assert = require('assert'); +const assert = require('node:assert'); +const Module = require('node:module'); Error.stackTraceLimit = 5; -assert.strictEqual(process.sourceMapsEnabled, true); -process.setSourceMapsEnabled(false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: true, + generatedCode: true, +}); +Module.setSourceMapsSupport(false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); assert.strictEqual(process.sourceMapsEnabled, false); try { @@ -19,7 +31,13 @@ try { // support enabled programmatically. delete require.cache[require .resolve('../enclosing-call-site-min.js')]; -process.setSourceMapsEnabled(true); +Module.setSourceMapsSupport(true); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: false, + generatedCode: false, +}); assert.strictEqual(process.sourceMapsEnabled, true); try { diff --git a/test/fixtures/source-map/output/source_map_disabled_by_process_api.js b/test/fixtures/source-map/output/source_map_disabled_by_process_api.js new file mode 100644 index 00000000000000..f9fc5b0c966ca6 --- /dev/null +++ b/test/fixtures/source-map/output/source_map_disabled_by_process_api.js @@ -0,0 +1,42 @@ +// Flags: --enable-source-maps + +'use strict'; +require('../../../common'); +const assert = require('node:assert'); +const Module = require('node:module'); +Error.stackTraceLimit = 5; + +assert.strictEqual(process.sourceMapsEnabled, true); +process.setSourceMapsEnabled(false); +assert.strictEqual(process.sourceMapsEnabled, false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); + +try { + require('../enclosing-call-site-min.js'); +} catch (e) { + console.log(e); +} + +// Delete the CJS module cache and loading the module again with source maps +// support enabled programmatically. +delete require.cache[require + .resolve('../enclosing-call-site-min.js')]; +process.setSourceMapsEnabled(true); +assert.strictEqual(process.sourceMapsEnabled, true); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: true, + generatedCode: true, +}); + +try { + require('../enclosing-call-site-min.js'); +} catch (e) { + console.log(e); +} diff --git a/test/fixtures/source-map/output/source_map_disabled_by_process_api.snapshot b/test/fixtures/source-map/output/source_map_disabled_by_process_api.snapshot new file mode 100644 index 00000000000000..655cd6695e1116 --- /dev/null +++ b/test/fixtures/source-map/output/source_map_disabled_by_process_api.snapshot @@ -0,0 +1,12 @@ +Error: an error! + at functionD (*enclosing-call-site-min.js:1:156) + at functionC (*enclosing-call-site-min.js:1:97) + at functionB (*enclosing-call-site-min.js:1:60) + at functionA (*enclosing-call-site-min.js:1:26) + at Object. (*enclosing-call-site-min.js:1:199) +Error: an error! + at functionD (*enclosing-call-site.js:16:17) + at functionC (*enclosing-call-site.js:10:3) + at functionB (*enclosing-call-site.js:6:3) + at functionA (*enclosing-call-site.js:2:3) + at Object. (*enclosing-call-site.js:24:3) diff --git a/test/fixtures/source-map/output/source_map_enabled_by_api.js b/test/fixtures/source-map/output/source_map_enabled_by_api.js index 1dd4f9530c68db..e09e05b59339f4 100644 --- a/test/fixtures/source-map/output/source_map_enabled_by_api.js +++ b/test/fixtures/source-map/output/source_map_enabled_by_api.js @@ -1,10 +1,22 @@ 'use strict'; require('../../../common'); -const assert = require('assert'); +const assert = require('node:assert'); +const Module = require('node:module'); Error.stackTraceLimit = 5; -assert.strictEqual(process.sourceMapsEnabled, false); -process.setSourceMapsEnabled(true); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); +Module.setSourceMapsSupport(true); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: false, + generatedCode: false, +}); assert.strictEqual(process.sourceMapsEnabled, true); try { @@ -16,7 +28,13 @@ try { delete require.cache[require .resolve('../enclosing-call-site-min.js')]; -process.setSourceMapsEnabled(false); +Module.setSourceMapsSupport(false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); assert.strictEqual(process.sourceMapsEnabled, false); try { diff --git a/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.js b/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.js new file mode 100644 index 00000000000000..5de2f3b0d7eb85 --- /dev/null +++ b/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.js @@ -0,0 +1,48 @@ +'use strict'; +require('../../../common'); +const assert = require('node:assert'); +const Module = require('node:module'); +Error.stackTraceLimit = 5; + +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); +Module.setSourceMapsSupport(true, { + nodeModules: true, +}); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: true, + generatedCode: false, +}); +assert.strictEqual(process.sourceMapsEnabled, true); + +try { + require('../node_modules/error-stack/enclosing-call-site-min.js').simpleErrorStack(); +} catch (e) { + console.log(e); +} + +delete require.cache[require + .resolve('../node_modules/error-stack/enclosing-call-site-min.js')]; + +Module.setSourceMapsSupport(true, { + nodeModules: false, +}); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: false, + generatedCode: false, +}); +assert.strictEqual(process.sourceMapsEnabled, true); + +try { + require('../node_modules/error-stack/enclosing-call-site-min.js').simpleErrorStack(); +} catch (e) { + console.log(e); +} \ No newline at end of file diff --git a/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.snapshot b/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.snapshot new file mode 100644 index 00000000000000..f46c21dbe42057 --- /dev/null +++ b/test/fixtures/source-map/output/source_map_enabled_by_api_node_modules.snapshot @@ -0,0 +1,12 @@ +Error: an error! + at functionD (*node_modules*error-stack*enclosing-call-site.js:16:17) + at functionC (*node_modules*error-stack*enclosing-call-site.js:10:3) + at functionB (*node_modules*error-stack*enclosing-call-site.js:6:3) + at functionA (*node_modules*error-stack*enclosing-call-site.js:2:3) + at Object. (*node_modules*error-stack*enclosing-call-site.js:24:3) +Error: an error! + at functionD (*node_modules*error-stack*enclosing-call-site-min.js:1:156) + at functionC (*node_modules*error-stack*enclosing-call-site-min.js:1:97) + at functionB (*node_modules*error-stack*enclosing-call-site-min.js:1:60) + at functionA (*node_modules*error-stack*enclosing-call-site-min.js:1:26) + at Object. (*node_modules*error-stack*enclosing-call-site-min.js:1:199) diff --git a/test/fixtures/source-map/output/source_map_enabled_by_process_api.js b/test/fixtures/source-map/output/source_map_enabled_by_process_api.js new file mode 100644 index 00000000000000..867a5cc082d40b --- /dev/null +++ b/test/fixtures/source-map/output/source_map_enabled_by_process_api.js @@ -0,0 +1,39 @@ +'use strict'; +require('../../../common'); +const assert = require('node:assert'); +const Module = require('node:module'); +Error.stackTraceLimit = 5; + +assert.strictEqual(process.sourceMapsEnabled, false); +process.setSourceMapsEnabled(true); +assert.strictEqual(process.sourceMapsEnabled, true); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: true, + nodeModules: true, + generatedCode: true, +}); + +try { + require('../enclosing-call-site-min.js'); +} catch (e) { + console.log(e); +} + +delete require.cache[require + .resolve('../enclosing-call-site-min.js')]; + +process.setSourceMapsEnabled(false); +assert.strictEqual(process.sourceMapsEnabled, false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); + +try { + require('../enclosing-call-site-min.js'); +} catch (e) { + console.log(e); +} diff --git a/test/fixtures/source-map/output/source_map_enabled_by_process_api.snapshot b/test/fixtures/source-map/output/source_map_enabled_by_process_api.snapshot new file mode 100644 index 00000000000000..082b3f310ed4f9 --- /dev/null +++ b/test/fixtures/source-map/output/source_map_enabled_by_process_api.snapshot @@ -0,0 +1,12 @@ +Error: an error! + at functionD (*enclosing-call-site.js:16:17) + at functionC (*enclosing-call-site.js:10:3) + at functionB (*enclosing-call-site.js:6:3) + at functionA (*enclosing-call-site.js:2:3) + at Object. (*enclosing-call-site.js:24:3) +Error: an error! + at functionD (*enclosing-call-site-min.js:1:156) + at functionC (*enclosing-call-site-min.js:1:97) + at functionB (*enclosing-call-site-min.js:1:60) + at functionA (*enclosing-call-site-min.js:1:26) + at Object. (*enclosing-call-site-min.js:1:199) diff --git a/test/fixtures/source-map/output/source_map_prepare_stack_trace.js b/test/fixtures/source-map/output/source_map_prepare_stack_trace.js index 1b04e0a3ac221b..894aea60a96f18 100644 --- a/test/fixtures/source-map/output/source_map_prepare_stack_trace.js +++ b/test/fixtures/source-map/output/source_map_prepare_stack_trace.js @@ -2,7 +2,8 @@ 'use strict'; require('../../../common'); -const assert = require('assert'); +const assert = require('node:assert'); +const Module = require('node:module'); Error.stackTraceLimit = 5; assert.strictEqual(typeof Error.prepareStackTrace, 'function'); @@ -22,8 +23,13 @@ try { // Source maps support is disabled programmatically even without deleting the // CJS module cache. -process.setSourceMapsEnabled(false); -assert.strictEqual(process.sourceMapsEnabled, false); +Module.setSourceMapsSupport(false); +assert.deepStrictEqual(Module.getSourceMapsSupport(), { + __proto__: null, + enabled: false, + nodeModules: false, + generatedCode: false, +}); try { require('../enclosing-call-site-min.js'); diff --git a/test/parallel/test-module-setsourcemapssupport.js b/test/parallel/test-module-setsourcemapssupport.js new file mode 100644 index 00000000000000..ea3e396a5c5960 --- /dev/null +++ b/test/parallel/test-module-setsourcemapssupport.js @@ -0,0 +1,43 @@ +'use strict'; +require('../common'); +const assert = require('node:assert'); +const Module = require('node:module'); + +// This test verifies that the `Module.setSourceMapsSupport` throws on invalid +// argument inputs. + +{ + const unexpectedValues = [ + undefined, + null, + 1, + {}, + () => {}, + ]; + for (const it of unexpectedValues) { + assert.throws(() => { + Module.setSourceMapsSupport(it); + }, /ERR_INVALID_ARG_TYPE/); + } +} + +{ + const unexpectedValues = [ + null, + 1, + {}, + () => {}, + ]; + for (const it of unexpectedValues) { + assert.throws(() => { + Module.setSourceMapsSupport(true, { + nodeModules: it, + }); + }, /ERR_INVALID_ARG_TYPE/); + assert.throws(() => { + Module.setSourceMapsSupport(true, { + generatedCode: it, + }); + }, /ERR_INVALID_ARG_TYPE/); + } +} diff --git a/test/parallel/test-node-output-sourcemaps.mjs b/test/parallel/test-node-output-sourcemaps.mjs index e9104db220867f..29cc5eb711f176 100644 --- a/test/parallel/test-node-output-sourcemaps.mjs +++ b/test/parallel/test-node-output-sourcemaps.mjs @@ -27,7 +27,10 @@ describe('sourcemaps output', { concurrency: !process.env.TEST_PARALLEL }, () => const tests = [ { name: 'source-map/output/source_map_disabled_by_api.js' }, + { name: 'source-map/output/source_map_disabled_by_process_api.js' }, { name: 'source-map/output/source_map_enabled_by_api.js' }, + { name: 'source-map/output/source_map_enabled_by_api_node_modules.js' }, + { name: 'source-map/output/source_map_enabled_by_process_api.js' }, { name: 'source-map/output/source_map_enclosing_function.js' }, { name: 'source-map/output/source_map_eval.js' }, { name: 'source-map/output/source_map_no_source_file.js' }, From 2b6a82dcea84504a5b2bbb126206782be80cc3f8 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Sun, 19 Jan 2025 07:41:13 -0800 Subject: [PATCH 118/158] src: replace NoArrayBufferZeroFillScope with v8 option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NoArrayBufferZeroFillScope was added before the v8 option to create uninitialized backing stores was added. We can start moving away from it. PR-URL: https://github.com/nodejs/node/pull/56658 Reviewed-By: Yagiz Nizipli Reviewed-By: Michaël Zasso Reviewed-By: Chengzhong Wu Reviewed-By: Rafael Gonzaga Reviewed-By: Matteo Collina --- src/encoding_binding.cc | 6 +++--- src/env.cc | 14 +++++++++----- src/node_buffer.cc | 20 ++++++++------------ src/node_http2.cc | 32 ++++++++++++++------------------ src/stream_base.cc | 13 +++++++------ 5 files changed, 41 insertions(+), 44 deletions(-) diff --git a/src/encoding_binding.cc b/src/encoding_binding.cc index 885a0d072312e9..0438afe6efd8b6 100644 --- a/src/encoding_binding.cc +++ b/src/encoding_binding.cc @@ -15,6 +15,7 @@ namespace encoding_binding { using v8::ArrayBuffer; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Context; using v8::FunctionCallbackInfo; using v8::Isolate; @@ -124,9 +125,8 @@ void BindingData::EncodeUtf8String(const FunctionCallbackInfo& args) { Local ab; { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - std::unique_ptr bs = - ArrayBuffer::NewBackingStore(isolate, length); + std::unique_ptr bs = ArrayBuffer::NewBackingStore( + isolate, length, BackingStoreInitializationMode::kUninitialized); CHECK(bs); diff --git a/src/env.cc b/src/env.cc index 0eda889802710d..cd7203ffda6e7c 100644 --- a/src/env.cc +++ b/src/env.cc @@ -39,6 +39,9 @@ namespace node { using errors::TryCatchScope; using v8::Array; +using v8::ArrayBuffer; +using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Boolean; using v8::Context; using v8::CppHeap; @@ -742,17 +745,18 @@ void Environment::add_refs(int64_t diff) { } uv_buf_t Environment::allocate_managed_buffer(const size_t suggested_size) { - NoArrayBufferZeroFillScope no_zero_fill_scope(isolate_data()); - std::unique_ptr bs = - v8::ArrayBuffer::NewBackingStore(isolate(), suggested_size); + std::unique_ptr bs = ArrayBuffer::NewBackingStore( + isolate(), + suggested_size, + BackingStoreInitializationMode::kUninitialized); uv_buf_t buf = uv_buf_init(static_cast(bs->Data()), bs->ByteLength()); released_allocated_buffers_.emplace(buf.base, std::move(bs)); return buf; } -std::unique_ptr Environment::release_managed_buffer( +std::unique_ptr Environment::release_managed_buffer( const uv_buf_t& buf) { - std::unique_ptr bs; + std::unique_ptr bs; if (buf.base != nullptr) { auto it = released_allocated_buffers_.find(buf.base); CHECK_NE(it, released_allocated_buffers_.end()); diff --git a/src/node_buffer.cc b/src/node_buffer.cc index 2e0e8d4746fb61..e8eae4eff51144 100644 --- a/src/node_buffer.cc +++ b/src/node_buffer.cc @@ -58,6 +58,7 @@ namespace Buffer { using v8::ArrayBuffer; using v8::ArrayBufferView; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Context; using v8::EscapableHandleScope; using v8::FastApiTypedArray; @@ -372,9 +373,8 @@ MaybeLocal New(Environment* env, size_t length) { Local ab; { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - std::unique_ptr bs = - ArrayBuffer::NewBackingStore(isolate, length); + std::unique_ptr bs = ArrayBuffer::NewBackingStore( + isolate, length, BackingStoreInitializationMode::kUninitialized); CHECK(bs); @@ -413,18 +413,14 @@ MaybeLocal Copy(Environment* env, const char* data, size_t length) { return Local(); } - Local ab; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - std::unique_ptr bs = - ArrayBuffer::NewBackingStore(isolate, length); + std::unique_ptr bs = ArrayBuffer::NewBackingStore( + isolate, length, BackingStoreInitializationMode::kUninitialized); - CHECK(bs); + CHECK(bs); - memcpy(bs->Data(), data, length); + memcpy(bs->Data(), data, length); - ab = ArrayBuffer::New(isolate, std::move(bs)); - } + Local ab = ArrayBuffer::New(isolate, std::move(bs)); MaybeLocal obj = New(env, ab, 0, ab->ByteLength()) diff --git a/src/node_http2.cc b/src/node_http2.cc index b23f4080a6d4e4..38b3046861e805 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -27,6 +27,7 @@ using v8::Array; using v8::ArrayBuffer; using v8::ArrayBufferView; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Boolean; using v8::Context; using v8::EscapableHandleScope; @@ -292,11 +293,10 @@ Local Http2Settings::Pack( size_t count, const nghttp2_settings_entry* entries) { EscapableHandleScope scope(env->isolate()); - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), count * 6); - } + std::unique_ptr bs = ArrayBuffer::NewBackingStore( + env->isolate(), + count * 6, + BackingStoreInitializationMode::kUninitialized); if (nghttp2_pack_settings_payload(static_cast(bs->Data()), bs->ByteLength(), entries, @@ -457,13 +457,11 @@ Origins::Origins( return; } - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs_ = ArrayBuffer::NewBackingStore(env->isolate(), - alignof(nghttp2_origin_entry) - 1 + - count_ * sizeof(nghttp2_origin_entry) + - origin_string_len); - } + bs_ = ArrayBuffer::NewBackingStore( + env->isolate(), + alignof(nghttp2_origin_entry) - 1 + + count_ * sizeof(nghttp2_origin_entry) + origin_string_len, + BackingStoreInitializationMode::kUninitialized); // Make sure the start address is aligned appropriately for an nghttp2_nv*. char* start = nbytes::AlignUp(static_cast(bs_->Data()), @@ -2090,12 +2088,10 @@ void Http2Session::OnStreamRead(ssize_t nread, const uv_buf_t& buf_) { // happen, we concatenate the data we received with the already-stored // pending input data, slicing off the already processed part. size_t pending_len = stream_buf_.len - stream_buf_offset_; - std::unique_ptr new_bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env()->isolate_data()); - new_bs = ArrayBuffer::NewBackingStore(env()->isolate(), - pending_len + nread); - } + std::unique_ptr new_bs = ArrayBuffer::NewBackingStore( + env()->isolate(), + pending_len + nread, + BackingStoreInitializationMode::kUninitialized); memcpy(static_cast(new_bs->Data()), stream_buf_.base + stream_buf_offset_, pending_len); diff --git a/src/stream_base.cc b/src/stream_base.cc index 9d855c2992492d..518e723272dcbc 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -19,6 +19,7 @@ namespace node { using v8::Array; using v8::ArrayBuffer; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::ConstructorBehavior; using v8::Context; using v8::DontDelete; @@ -243,8 +244,8 @@ int StreamBase::Writev(const FunctionCallbackInfo& args) { std::unique_ptr bs; if (storage_size > 0) { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(isolate, storage_size); + bs = ArrayBuffer::NewBackingStore( + isolate, storage_size, BackingStoreInitializationMode::kUninitialized); } offset = 0; @@ -398,14 +399,14 @@ int StreamBase::WriteString(const FunctionCallbackInfo& args) { if (try_write) { // Copy partial data - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(isolate, buf.len); + bs = ArrayBuffer::NewBackingStore( + isolate, buf.len, BackingStoreInitializationMode::kUninitialized); memcpy(static_cast(bs->Data()), buf.base, buf.len); data_size = buf.len; } else { // Write it - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(isolate, storage_size); + bs = ArrayBuffer::NewBackingStore( + isolate, storage_size, BackingStoreInitializationMode::kUninitialized); data_size = StringBytes::Write(isolate, static_cast(bs->Data()), storage_size, From 2a219eddf6ef65371759090689eccc9e2744197d Mon Sep 17 00:00:00 2001 From: Rich Trott Date: Thu, 23 Jan 2025 20:38:49 -0800 Subject: [PATCH 119/158] test: enforce strict mode in test-zlib-const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of checking that assignments fail silently in sloppy mode, check that they throw in strict mode. PR-URL: https://github.com/nodejs/node/pull/56689 Reviewed-By: Michaël Zasso Reviewed-By: Luigi Pinca Reviewed-By: James M Snell --- test/parallel/test-zlib-const.js | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/test/parallel/test-zlib-const.js b/test/parallel/test-zlib-const.js index 342c8c712a475b..5b9a127f0eaa02 100644 --- a/test/parallel/test-zlib-const.js +++ b/test/parallel/test-zlib-const.js @@ -1,4 +1,4 @@ -/* eslint-disable strict */ +'use strict'; require('../common'); const assert = require('assert'); @@ -9,27 +9,17 @@ assert.strictEqual(zlib.constants.Z_OK, 0, 'Expected Z_OK to be 0;', `got ${zlib.constants.Z_OK}`, ].join(' ')); -zlib.constants.Z_OK = 1; -assert.strictEqual(zlib.constants.Z_OK, 0, - [ - 'Z_OK should be immutable.', - `Expected to get 0, got ${zlib.constants.Z_OK}`, - ].join(' ')); + +assert.throws(() => { zlib.constants.Z_OK = 1; }, + TypeError, 'zlib.constants.Z_OK should be immutable'); assert.strictEqual(zlib.codes.Z_OK, 0, `Expected Z_OK to be 0; got ${zlib.codes.Z_OK}`); -zlib.codes.Z_OK = 1; -assert.strictEqual(zlib.codes.Z_OK, 0, - [ - 'Z_OK should be immutable.', - `Expected to get 0, got ${zlib.codes.Z_OK}`, - ].join(' ')); -zlib.codes = { Z_OK: 1 }; -assert.strictEqual(zlib.codes.Z_OK, 0, - [ - 'Z_OK should be immutable.', - `Expected to get 0, got ${zlib.codes.Z_OK}`, - ].join(' ')); +assert.throws(() => { zlib.codes.Z_OK = 1; }, + TypeError, 'zlib.codes.Z_OK should be immutable'); + +assert.throws(() => { zlib.codes = { Z_OK: 1 }; }, + TypeError, 'zlib.codes should be immutable'); assert.ok(Object.isFrozen(zlib.codes), [ From 649cf0c0f6f62d521a4684c0874dfdbe5b516db0 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Fri, 24 Jan 2025 15:22:12 +0100 Subject: [PATCH 120/158] tools: do not throw on missing `create-release-proposal.sh` PR-URL: https://github.com/nodejs/node/pull/56704 Reviewed-By: James M Snell Reviewed-By: Tierney Cyren Reviewed-By: Rafael Gonzaga --- .github/workflows/create-release-proposal.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/create-release-proposal.yml b/.github/workflows/create-release-proposal.yml index 33426bdcda4a5b..0b580eab81ac76 100644 --- a/.github/workflows/create-release-proposal.yml +++ b/.github/workflows/create-release-proposal.yml @@ -71,13 +71,10 @@ jobs: git config --local user.name "Node.js GitHub Bot" - name: Start git node release prepare - # `git update-index` tells git to ignore future changes to the `.sh` file, - # `|| true` is there to ignore the error if such file doesn't exist yet. # The curl command is to make sure we run the version of the script corresponding to the current workflow. run: | - git update-index --assume-unchanged tools/actions/create-release-proposal.sh || true - curl -fsSLo tools/actions/create-release-proposal.sh https://github.com/${GITHUB_REPOSITORY}/raw/${GITHUB_SHA}/tools/actions/create-release-proposal.sh - ./tools/actions/create-release-proposal.sh "${RELEASE_DATE}" "${RELEASE_LINE}" "${GITHUB_ACTOR}" + curl -fsSL https://github.com/${GITHUB_REPOSITORY}/raw/${GITHUB_SHA}/tools/actions/create-release-proposal.sh |\ + sh -s -- "${RELEASE_DATE}" "${RELEASE_LINE}" "${GITHUB_ACTOR}" env: GH_TOKEN: ${{ github.token }} # We want the bot to push the push the release commit so CI runs on it. From a9d332a16fc7a87fec4ea7724b7e1bd4370fbbe4 Mon Sep 17 00:00:00 2001 From: Jordan Harband Date: Sun, 12 Jan 2025 13:04:40 -0800 Subject: [PATCH 121/158] util: inspect: do not crash on an Error stack that contains a Symbol See #56570 PR-URL: https://github.com/nodejs/node/pull/56573 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca Reviewed-By: Jason Zhang Reviewed-By: Ruben Bridgewater --- lib/internal/util/inspect.js | 14 ++++++++++---- test/parallel/test-util-inspect.js | 20 ++++++++++++++++---- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/lib/internal/util/inspect.js b/lib/internal/util/inspect.js index 6d0496b151b0fe..091d3a53f10c10 100644 --- a/lib/internal/util/inspect.js +++ b/lib/internal/util/inspect.js @@ -1287,8 +1287,14 @@ function identicalSequenceRange(a, b) { return { len: 0, offset: 0 }; } -function getStackString(error) { - return error.stack ? String(error.stack) : ErrorPrototypeToString(error); +function getStackString(ctx, error) { + if (error.stack) { + if (typeof error.stack === 'string') { + return error.stack; + } + return formatValue(ctx, error.stack); + } + return ErrorPrototypeToString(error); } function getStackFrames(ctx, err, stack) { @@ -1303,7 +1309,7 @@ function getStackFrames(ctx, err, stack) { // Remove stack frames identical to frames in cause. if (cause != null && isError(cause)) { - const causeStack = getStackString(cause); + const causeStack = getStackString(ctx, cause); const causeStackStart = StringPrototypeIndexOf(causeStack, '\n at'); if (causeStackStart !== -1) { const causeFrames = StringPrototypeSplit(StringPrototypeSlice(causeStack, causeStackStart + 1), '\n'); @@ -1426,7 +1432,7 @@ function safeGetCWD() { function formatError(err, constructor, tag, ctx, keys) { const name = err.name != null ? err.name : 'Error'; - let stack = getStackString(err); + let stack = getStackString(ctx, err); removeDuplicateErrorKeys(ctx, keys, err, stack); diff --git a/test/parallel/test-util-inspect.js b/test/parallel/test-util-inspect.js index b3b08a82d5b7b0..e84d80073bb3ad 100644 --- a/test/parallel/test-util-inspect.js +++ b/test/parallel/test-util-inspect.js @@ -777,16 +777,18 @@ assert.strictEqual(util.inspect(-5e-324), '-5e-324'); [undefined, 'RangeError: foo', '[RangeError: foo]'], [false, 'false [RangeError]: foo', '[RangeError: foo]'], ['', 'foo', '[RangeError: foo]'], - [[1, 2, 3], '1,2,3 [RangeError]: foo', '[1,2,3]'], + [[1, 2, 3], '1,2,3 [RangeError]: foo', '[[\n 1,\n 2,\n 3\n]]'], ].forEach(([value, outputStart, stack]) => { let err = new RangeError('foo'); err.name = value; + const result = util.inspect(err); assert( - util.inspect(err).startsWith(outputStart), + result.startsWith(outputStart), util.format( - 'The name set to %o did not result in the expected output "%s"', + 'The name set to %o did not result in the expected output "%s", got "%s"', value, - outputStart + outputStart, + result.split('\n')[0] ) ); @@ -3416,3 +3418,13 @@ assert.strictEqual( ${error.stack.split('\n').slice(1).join('\n')}`, ); } + +{ + const error = new Error(); + error.stack = [Symbol('foo')]; + + assert.strictEqual( + inspect(error), + '[[\n Symbol(foo)\n]]' + ); +} From c7a132229f31eade2204f921207222bcdc57d29f Mon Sep 17 00:00:00 2001 From: Jonas Date: Fri, 24 Jan 2025 11:22:54 -0500 Subject: [PATCH 122/158] test: add missing test for env file PR-URL: https://github.com/nodejs/node/pull/56642 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca Reviewed-By: Jake Yuesong Li Reviewed-By: Matteo Collina Reviewed-By: James M Snell --- test/parallel/test-dotenv-edge-cases.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/parallel/test-dotenv-edge-cases.js b/test/parallel/test-dotenv-edge-cases.js index 769d33a13b8ce9..926c8d0793ac8b 100644 --- a/test/parallel/test-dotenv-edge-cases.js +++ b/test/parallel/test-dotenv-edge-cases.js @@ -170,4 +170,16 @@ describe('.env supports edge cases', () => { assert.strictEqual(SingleQuotesChild.stderr, ''); assert.strictEqual(SingleQuotesChild.code, 0); }); + + it('should reject invalid env file flag', async () => { + const child = await common.spawnPromisified( + process.execPath, + ['--env-file-ABCD', validEnvFilePath], + { cwd: __dirname }, + ); + + assert.strictEqual(child.stdout, ''); + assert.strictEqual(child.code, 9); + assert.match(child.stderr, /bad option: --env-file-ABCD/); + }); }); From 6edf04ee5efb92658f77832bbb39185b9dea3f43 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Fri, 24 Jan 2025 16:58:32 -0800 Subject: [PATCH 123/158] test: move crypto related common utilities in common/crypto Since `common/crypto` already exists, it makes sense to keep crypto-related utilities there. The only exception being common.hasCrypto which is needed up front to determine if tests should be skipped. Eliminate the redundant check in hasFipsCrypto and just use crypto.getFips() directly where needed. PR-URL: https://github.com/nodejs/node/pull/56714 Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca --- test/addons/openssl-providers/providers.cjs | 7 ++- test/benchmark/test-benchmark-crypto.js | 5 +- test/common/README.md | 17 ------ test/common/crypto.js | 52 ++++++++++++++++++- test/common/index.js | 52 ------------------- test/common/index.mjs | 2 - test/parallel/test-cli-node-options.js | 3 +- test/parallel/test-crypto-authenticated.js | 20 ++++--- .../test-crypto-cipheriv-decipheriv.js | 10 ++-- test/parallel/test-crypto-classes.js | 6 +-- test/parallel/test-crypto-dh-constructor.js | 3 +- test/parallel/test-crypto-dh-errors.js | 3 +- test/parallel/test-crypto-dh-generate-keys.js | 3 +- test/parallel/test-crypto-dh-leak.js | 3 +- test/parallel/test-crypto-dh-odd-key.js | 8 +-- test/parallel/test-crypto-dh-stateless.js | 7 +-- test/parallel/test-crypto-dh.js | 17 +++--- test/parallel/test-crypto-ecb.js | 13 +++-- test/parallel/test-crypto-fips.js | 5 +- test/parallel/test-crypto-hash.js | 7 +-- test/parallel/test-crypto-hkdf.js | 5 +- test/parallel/test-crypto-hmac.js | 5 +- test/parallel/test-crypto-key-objects.js | 10 ++-- ...test-crypto-keygen-async-dsa-key-object.js | 8 +-- test/parallel/test-crypto-keygen-async-dsa.js | 8 +-- ...-explicit-elliptic-curve-encrypted-p256.js | 4 +- ...nc-explicit-elliptic-curve-encrypted.js.js | 3 +- ...ync-named-elliptic-curve-encrypted-p256.js | 3 +- ...en-async-named-elliptic-curve-encrypted.js | 3 +- test/parallel/test-crypto-keygen-async-rsa.js | 3 +- .../parallel/test-crypto-keygen-bit-length.js | 3 +- ...rypto-keygen-empty-passphrase-no-prompt.js | 3 +- .../test-crypto-keygen-missing-oid.js | 4 +- test/parallel/test-crypto-keygen.js | 3 +- test/parallel/test-crypto-no-algorithm.js | 4 +- test/parallel/test-crypto-oneshot-hash.js | 3 +- test/parallel/test-crypto-padding.js | 5 +- test/parallel/test-crypto-pbkdf2.js | 3 +- .../test-crypto-private-decrypt-gh32240.js | 4 +- ...t-crypto-publicDecrypt-fails-first-time.js | 8 ++- test/parallel/test-crypto-rsa-dsa.js | 7 +-- test/parallel/test-crypto-secure-heap.js | 13 +++-- test/parallel/test-crypto-sign-verify.js | 13 +++-- test/parallel/test-crypto-stream.js | 8 +-- test/parallel/test-crypto-x509.js | 5 +- test/parallel/test-crypto.js | 7 +-- test/parallel/test-dsa-fips-invalid-key.js | 10 +++- .../test-https-agent-session-eviction.js | 9 ++-- .../test-https-client-renegotiation-limit.js | 8 ++- test/parallel/test-https-foafssl.js | 10 ++-- ...rocess-env-allowed-flags-are-documented.js | 7 +-- test/parallel/test-process-versions.js | 3 +- test/parallel/test-tls-alert-handling.js | 20 ++++--- test/parallel/test-tls-alert.js | 15 ++++-- test/parallel/test-tls-alpn-server-client.js | 5 +- test/parallel/test-tls-cert-ext-encoding.js | 4 +- test/parallel/test-tls-client-auth.js | 7 ++- .../test-tls-client-getephemeralkeyinfo.js | 3 +- test/parallel/test-tls-client-mindhsize.js | 5 +- .../test-tls-client-renegotiation-13.js | 8 ++- .../test-tls-client-renegotiation-limit.js | 8 ++- test/parallel/test-tls-dhe.js | 17 ++++-- test/parallel/test-tls-ecdh-auto.js | 10 ++-- test/parallel/test-tls-ecdh-multiple.js | 14 +++-- test/parallel/test-tls-ecdh.js | 10 ++-- test/parallel/test-tls-empty-sni-context.js | 4 +- test/parallel/test-tls-getprotocol.js | 6 ++- test/parallel/test-tls-junk-server.js | 7 ++- test/parallel/test-tls-key-mismatch.js | 6 ++- test/parallel/test-tls-legacy-pfx.js | 9 +++- test/parallel/test-tls-min-max-version.js | 16 ++++-- test/parallel/test-tls-no-sslv3.js | 10 ++-- test/parallel/test-tls-ocsp-callback.js | 15 ++++-- test/parallel/test-tls-psk-circuit.js | 10 ++-- test/parallel/test-tls-psk-server.js | 11 ++-- test/parallel/test-tls-securepair-server.js | 10 ++-- test/parallel/test-tls-server-verify.js | 10 ++-- test/parallel/test-tls-session-cache.js | 20 ++++--- test/parallel/test-tls-set-ciphers.js | 16 ++++-- test/parallel/test-tls-set-secure-context.js | 6 ++- test/parallel/test-tls-set-sigalgs.js | 7 ++- test/parallel/test-trace-env.js | 8 +-- test/parallel/test-x509-escaping.js | 5 +- test/pummel/test-crypto-dh-hash.js | 4 +- test/pummel/test-crypto-dh-keys.js | 3 +- test/pummel/test-dh-regr.js | 3 +- test/sequential/test-tls-psk-client.js | 11 ++-- test/sequential/test-tls-securepair-client.js | 25 +++++---- test/sequential/test-tls-session-timeout.js | 10 ++-- 89 files changed, 505 insertions(+), 288 deletions(-) diff --git a/test/addons/openssl-providers/providers.cjs b/test/addons/openssl-providers/providers.cjs index 2dabbf020e2a41..efa1019c62d99c 100644 --- a/test/addons/openssl-providers/providers.cjs +++ b/test/addons/openssl-providers/providers.cjs @@ -1,11 +1,14 @@ 'use strict'; const common = require('../../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} +const { hasOpenSSL3 } = require('../../common/crypto'); -if (!common.hasOpenSSL3) +if (!hasOpenSSL3) { common.skip('this test requires OpenSSL 3.x'); +} const assert = require('node:assert'); const { createHash, getCiphers, getHashes } = require('node:crypto'); const { debuglog } = require('node:util'); diff --git a/test/benchmark/test-benchmark-crypto.js b/test/benchmark/test-benchmark-crypto.js index 7f6988acf234d8..72d79ece13e787 100644 --- a/test/benchmark/test-benchmark-crypto.js +++ b/test/benchmark/test-benchmark-crypto.js @@ -5,8 +5,11 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -if (common.hasFipsCrypto) +const { getFips } = require('crypto'); + +if (getFips()) { common.skip('some benchmarks are FIPS-incompatible'); +} const runBenchmark = require('../common/benchmark'); diff --git a/test/common/README.md b/test/common/README.md index 5f5ff75fca2431..ee36503f920001 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -226,17 +226,6 @@ The TTY file descriptor is assumed to be capable of being writable. Indicates whether OpenSSL is available. -### `hasFipsCrypto` - -* [\][] - -Indicates that Node.js has been linked with a FIPS compatible OpenSSL library, -and that FIPS as been enabled using `--enable-fips`. - -To only detect if the OpenSSL library is FIPS compatible, regardless if it has -been enabled or not, then `process.config.variables.openssl_is_fips` can be -used to determine that situation. - ### `hasIntl` * [\][] @@ -417,12 +406,6 @@ Returns `true` if the exit code `exitCode` and/or signal name `signal` represent the exit code and/or signal name of a node process that aborted, `false` otherwise. -### `opensslCli` - -* [\][] - -Indicates whether 'opensslCli' is supported. - ### `platformTimeout(ms)` * `ms` [\][] | [\][] diff --git a/test/common/crypto.js b/test/common/crypto.js index 10432d7e7a7e32..f50d3895a1783b 100644 --- a/test/common/crypto.js +++ b/test/common/crypto.js @@ -1,8 +1,9 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); @@ -98,6 +99,27 @@ const pkcs8EncExp = getRegExpForPEM('ENCRYPTED PRIVATE KEY'); const sec1Exp = getRegExpForPEM('EC PRIVATE KEY'); const sec1EncExp = (cipher) => getRegExpForPEM('EC PRIVATE KEY', cipher); +// Synthesize OPENSSL_VERSION_NUMBER format with the layout 0xMNN00PPSL +const opensslVersionNumber = (major = 0, minor = 0, patch = 0) => { + assert(major >= 0 && major <= 0xf); + assert(minor >= 0 && minor <= 0xff); + assert(patch >= 0 && patch <= 0xff); + return (major << 28) | (minor << 20) | (patch << 4); +}; + +let OPENSSL_VERSION_NUMBER; +const hasOpenSSL = (major = 0, minor = 0, patch = 0) => { + if (!common.hasCrypto) return false; + if (OPENSSL_VERSION_NUMBER === undefined) { + const regexp = /(?\d+)\.(?\d+)\.(?

\d+)/; + const { m, n, p } = process.versions.openssl.match(regexp).groups; + OPENSSL_VERSION_NUMBER = opensslVersionNumber(m, n, p); + } + return OPENSSL_VERSION_NUMBER >= opensslVersionNumber(major, minor, patch); +}; + +let opensslCli = null; + module.exports = { modp2buf, assertApproximateSize, @@ -111,4 +133,32 @@ module.exports = { pkcs8EncExp, // used once sec1Exp, sec1EncExp, + hasOpenSSL, + get hasOpenSSL3() { + return hasOpenSSL(3); + }, + // opensslCli defined lazily to reduce overhead of spawnSync + get opensslCli() { + if (opensslCli !== null) return opensslCli; + + if (process.config.variables.node_shared_openssl) { + // Use external command + opensslCli = 'openssl'; + } else { + const path = require('path'); + // Use command built from sources included in Node.js repository + opensslCli = path.join(path.dirname(process.execPath), 'openssl-cli'); + } + + if (exports.isWindows) opensslCli += '.exe'; + + const { spawnSync } = require('child_process'); + + const opensslCmd = spawnSync(opensslCli, ['version']); + if (opensslCmd.status !== 0 || opensslCmd.error !== undefined) { + // OpenSSL command cannot be executed + opensslCli = false; + } + return opensslCli; + }, }; diff --git a/test/common/index.js b/test/common/index.js index b5592a66a081c3..d2c39578324600 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -19,7 +19,6 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. -/* eslint-disable node-core/crypto-check */ 'use strict'; const process = global.process; // Some tests tamper with the process global. @@ -57,25 +56,6 @@ const noop = () => {}; const hasCrypto = Boolean(process.versions.openssl) && !process.env.NODE_SKIP_CRYPTO; -// Synthesize OPENSSL_VERSION_NUMBER format with the layout 0xMNN00PPSL -const opensslVersionNumber = (major = 0, minor = 0, patch = 0) => { - assert(major >= 0 && major <= 0xf); - assert(minor >= 0 && minor <= 0xff); - assert(patch >= 0 && patch <= 0xff); - return (major << 28) | (minor << 20) | (patch << 4); -}; - -let OPENSSL_VERSION_NUMBER; -const hasOpenSSL = (major = 0, minor = 0, patch = 0) => { - if (!hasCrypto) return false; - if (OPENSSL_VERSION_NUMBER === undefined) { - const regexp = /(?\d+)\.(?\d+)\.(?

\d+)/; - const { m, n, p } = process.versions.openssl.match(regexp).groups; - OPENSSL_VERSION_NUMBER = opensslVersionNumber(m, n, p); - } - return OPENSSL_VERSION_NUMBER >= opensslVersionNumber(major, minor, patch); -}; - const hasQuic = hasCrypto && !!process.config.variables.openssl_quic; function parseTestFlags(filename = process.argv[1]) { @@ -220,7 +200,6 @@ if (process.env.NODE_TEST_WITH_ASYNC_HOOKS) { }).enable(); } -let opensslCli = null; let inFreeBSDJail = null; let localhostIPv4 = null; @@ -985,7 +964,6 @@ const common = { getTTYfd, hasIntl, hasCrypto, - hasOpenSSL, hasQuic, hasMultiLocalhost, invalidArgTypeHelper, @@ -1027,10 +1005,6 @@ const common = { return require('os').totalmem() > 0x70000000; /* 1.75 Gb */ }, - get hasFipsCrypto() { - return hasCrypto && require('crypto').getFips(); - }, - get hasIPv6() { const iFaces = require('os').networkInterfaces(); let re; @@ -1047,10 +1021,6 @@ const common = { }); }, - get hasOpenSSL3() { - return hasOpenSSL(3); - }, - get inFreeBSDJail() { if (inFreeBSDJail !== null) return inFreeBSDJail; @@ -1100,28 +1070,6 @@ const common = { return localhostIPv4; }, - // opensslCli defined lazily to reduce overhead of spawnSync - get opensslCli() { - if (opensslCli !== null) return opensslCli; - - if (process.config.variables.node_shared_openssl) { - // Use external command - opensslCli = 'openssl'; - } else { - // Use command built from sources included in Node.js repository - opensslCli = path.join(path.dirname(process.execPath), 'openssl-cli'); - } - - if (exports.isWindows) opensslCli += '.exe'; - - const opensslCmd = spawnSync(opensslCli, ['version']); - if (opensslCmd.status !== 0 || opensslCmd.error !== undefined) { - // OpenSSL command cannot be executed - opensslCli = false; - } - return opensslCli; - }, - get PORT() { if (+process.env.TEST_PARALLEL) { throw new Error('common.PORT cannot be used in a parallelized test'); diff --git a/test/common/index.mjs b/test/common/index.mjs index b252f2dc4aac5e..23328ac90ea3c9 100644 --- a/test/common/index.mjs +++ b/test/common/index.mjs @@ -41,7 +41,6 @@ const { mustNotMutateObjectDeep, mustSucceed, nodeProcessAborted, - opensslCli, parseTestFlags, PIPE, platformTimeout, @@ -97,7 +96,6 @@ export { mustNotMutateObjectDeep, mustSucceed, nodeProcessAborted, - opensslCli, parseTestFlags, PIPE, platformTimeout, diff --git a/test/parallel/test-cli-node-options.js b/test/parallel/test-cli-node-options.js index 69bf136559c1a8..9e89200e9f6dfd 100644 --- a/test/parallel/test-cli-node-options.js +++ b/test/parallel/test-cli-node-options.js @@ -12,6 +12,7 @@ const { Worker } = require('worker_threads'); const fixtures = require('../common/fixtures'); const tmpdir = require('../common/tmpdir'); +const { hasOpenSSL3 } = require('../common/crypto'); tmpdir.refresh(); const printA = path.relative(tmpdir.path, fixtures.path('printA.js')); @@ -64,7 +65,7 @@ if (common.isLinux) { if (common.hasCrypto) { expectNoWorker('--use-openssl-ca', 'B\n'); expectNoWorker('--use-bundled-ca', 'B\n'); - if (!common.hasOpenSSL3) + if (!hasOpenSSL3) expectNoWorker('--openssl-config=_ossl_cfg', 'B\n'); } diff --git a/test/parallel/test-crypto-authenticated.js b/test/parallel/test-crypto-authenticated.js index d191ab7be2de20..181ea732b91281 100644 --- a/test/parallel/test-crypto-authenticated.js +++ b/test/parallel/test-crypto-authenticated.js @@ -21,13 +21,17 @@ // Flags: --no-warnings 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); const { inspect } = require('util'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); + +const isFipsEnabled = crypto.getFips(); // // Test authenticated encryption modes. @@ -53,7 +57,7 @@ for (const test of TEST_CASES) { continue; } - if (common.hasFipsCrypto && test.iv.length < 24) { + if (isFipsEnabled && test.iv.length < 24) { common.printSkipMessage('IV len < 12 bytes unsupported in FIPS mode'); continue; } @@ -95,7 +99,7 @@ for (const test of TEST_CASES) { } { - if (isCCM && common.hasFipsCrypto) { + if (isCCM && isFipsEnabled) { assert.throws(() => { crypto.createDecipheriv(test.algo, Buffer.from(test.key, 'hex'), @@ -286,7 +290,7 @@ for (const test of TEST_CASES) { }); }, errMessages.authTagLength); - if (!common.hasFipsCrypto) { + if (!isFipsEnabled) { assert.throws(() => { crypto.createDecipheriv('aes-256-ccm', 'FxLKsqdmv0E9xrQhp0b1ZgI0K7JFZJM8', @@ -312,7 +316,7 @@ for (const test of TEST_CASES) { }); // CCM decryption and create(De|C)ipher are unsupported in FIPS mode. - if (!common.hasFipsCrypto) { + if (!isFipsEnabled) { assert.throws(() => { crypto.createDecipheriv(`aes-256-${mode}`, 'FxLKsqdmv0E9xrQhp0b1ZgI0K7JFZJM8', @@ -388,7 +392,7 @@ for (const test of TEST_CASES) { cipher.setAAD(Buffer.from('0123456789', 'hex')); }, /options\.plaintextLength required for CCM mode with AAD/); - if (!common.hasFipsCrypto) { + if (!isFipsEnabled) { assert.throws(() => { const cipher = crypto.createDecipheriv('aes-256-ccm', 'FxLKsqdmv0E9xrQhp0b1ZgI0K7JFZJM8', @@ -403,7 +407,7 @@ for (const test of TEST_CASES) { // Test that final() throws in CCM mode when no authentication tag is provided. { - if (!common.hasFipsCrypto) { + if (!isFipsEnabled) { const key = Buffer.from('1ed2233fa2223ef5d7df08546049406c', 'hex'); const iv = Buffer.from('7305220bca40d4c90e1791e9', 'hex'); const ct = Buffer.from('8beba09d4d4d861f957d51c0794f4abf8030848e', 'hex'); @@ -562,7 +566,7 @@ for (const test of TEST_CASES) { ]) { assert.throws(() => { cipher.final(); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { code: 'ERR_OSSL_TAG_NOT_SET' } : { message: /Unsupported state/ diff --git a/test/parallel/test-crypto-cipheriv-decipheriv.js b/test/parallel/test-crypto-cipheriv-decipheriv.js index 3e3632203af72c..88d07c3b957f57 100644 --- a/test/parallel/test-crypto-cipheriv-decipheriv.js +++ b/test/parallel/test-crypto-cipheriv-decipheriv.js @@ -5,6 +5,8 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); +const isFipsEnabled = crypto.getFips(); function testCipher1(key, iv) { // Test encryption and decryption with explicit key and iv @@ -150,7 +152,7 @@ testCipher1(Buffer.from('0123456789abcd0123456789'), '12345678'); testCipher1(Buffer.from('0123456789abcd0123456789'), Buffer.from('12345678')); testCipher2(Buffer.from('0123456789abcd0123456789'), Buffer.from('12345678')); -if (!common.hasFipsCrypto) { +if (!isFipsEnabled) { testCipher3(Buffer.from('000102030405060708090A0B0C0D0E0F', 'hex'), Buffer.from('A6A6A6A6A6A6A6A6', 'hex')); } @@ -193,10 +195,10 @@ assert.throws( errMessage); // But all other IV lengths should be accepted. -const minIvLength = common.hasOpenSSL3 ? 8 : 1; -const maxIvLength = common.hasOpenSSL3 ? 64 : 256; +const minIvLength = hasOpenSSL3 ? 8 : 1; +const maxIvLength = hasOpenSSL3 ? 64 : 256; for (let n = minIvLength; n < maxIvLength; n += 1) { - if (common.hasFipsCrypto && n < 12) continue; + if (isFipsEnabled && n < 12) continue; crypto.createCipheriv('aes-128-gcm', Buffer.alloc(16), Buffer.alloc(n)); } diff --git a/test/parallel/test-crypto-classes.js b/test/parallel/test-crypto-classes.js index f736921476a1c5..429bc91d4412be 100644 --- a/test/parallel/test-crypto-classes.js +++ b/test/parallel/test-crypto-classes.js @@ -4,9 +4,9 @@ const assert = require('assert'); if (!common.hasCrypto) { common.skip('missing crypto'); - return; } const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); // 'ClassName' : ['args', 'for', 'constructor'] const TEST_CASES = { @@ -21,8 +21,8 @@ const TEST_CASES = { 'ECDH': ['prime256v1'], }; -if (!common.hasFipsCrypto) { - TEST_CASES.DiffieHellman = [common.hasOpenSSL3 ? 1024 : 256]; +if (!crypto.getFips()) { + TEST_CASES.DiffieHellman = [hasOpenSSL3 ? 1024 : 256]; } for (const [clazz, args] of Object.entries(TEST_CASES)) { diff --git a/test/parallel/test-crypto-dh-constructor.js b/test/parallel/test-crypto-dh-constructor.js index c7eaca29347a2b..eb8674932484ed 100644 --- a/test/parallel/test-crypto-dh-constructor.js +++ b/test/parallel/test-crypto-dh-constructor.js @@ -5,8 +5,9 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); -const size = common.hasFipsCrypto || common.hasOpenSSL3 ? 1024 : 256; +const size = crypto.getFips() || hasOpenSSL3 ? 1024 : 256; const dh1 = crypto.createDiffieHellman(size); const p1 = dh1.getPrime('buffer'); diff --git a/test/parallel/test-crypto-dh-errors.js b/test/parallel/test-crypto-dh-errors.js index 476ca64b4425b5..0af4db0310750c 100644 --- a/test/parallel/test-crypto-dh-errors.js +++ b/test/parallel/test-crypto-dh-errors.js @@ -5,6 +5,7 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); // https://github.com/nodejs/node/issues/32738 // XXX(bnoordhuis) validateInt32() throwing ERR_OUT_OF_RANGE and RangeError @@ -24,7 +25,7 @@ assert.throws(() => crypto.createDiffieHellman('abcdef', 13.37), { }); for (const bits of [-1, 0, 1]) { - if (common.hasOpenSSL3) { + if (hasOpenSSL3) { assert.throws(() => crypto.createDiffieHellman(bits), { code: 'ERR_OSSL_DH_MODULUS_TOO_SMALL', name: 'Error', diff --git a/test/parallel/test-crypto-dh-generate-keys.js b/test/parallel/test-crypto-dh-generate-keys.js index fc277bb0d9b8e4..e4598274328bd8 100644 --- a/test/parallel/test-crypto-dh-generate-keys.js +++ b/test/parallel/test-crypto-dh-generate-keys.js @@ -6,9 +6,10 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); { - const size = common.hasFipsCrypto || common.hasOpenSSL3 ? 1024 : 256; + const size = crypto.getFips() || hasOpenSSL3 ? 1024 : 256; function unlessInvalidState(f) { try { diff --git a/test/parallel/test-crypto-dh-leak.js b/test/parallel/test-crypto-dh-leak.js index 1998d61d4affd7..3b5051feb43cd8 100644 --- a/test/parallel/test-crypto-dh-leak.js +++ b/test/parallel/test-crypto-dh-leak.js @@ -9,10 +9,11 @@ if (common.isASan) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); const before = process.memoryUsage.rss(); { - const size = common.hasFipsCrypto || common.hasOpenSSL3 ? 1024 : 256; + const size = crypto.getFips() || hasOpenSSL3 ? 1024 : 256; const dh = crypto.createDiffieHellman(size); const publicKey = dh.generateKeys(); const privateKey = dh.getPrivateKey(); diff --git a/test/parallel/test-crypto-dh-odd-key.js b/test/parallel/test-crypto-dh-odd-key.js index 69a1eb56c866b3..fbe42be425ed1c 100644 --- a/test/parallel/test-crypto-dh-odd-key.js +++ b/test/parallel/test-crypto-dh-odd-key.js @@ -21,22 +21,24 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); function test() { const odd = Buffer.alloc(39, 'A'); - const c = crypto.createDiffieHellman(common.hasOpenSSL3 ? 1024 : 32); + const c = crypto.createDiffieHellman(hasOpenSSL3 ? 1024 : 32); c.setPrivateKey(odd); c.generateKeys(); } // FIPS requires a length of at least 1024 -if (!common.hasFipsCrypto) { +if (!crypto.getFips()) { test(); } else { assert.throws(function() { test(); }, /key size too small/); diff --git a/test/parallel/test-crypto-dh-stateless.js b/test/parallel/test-crypto-dh-stateless.js index 2ccac322e23958..f4fc1849699e31 100644 --- a/test/parallel/test-crypto-dh-stateless.js +++ b/test/parallel/test-crypto-dh-stateless.js @@ -5,6 +5,7 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); assert.throws(() => crypto.diffieHellman(), { name: 'TypeError', @@ -150,7 +151,7 @@ const list = [ // TODO(danbev): Take a closer look if there should be a check in OpenSSL3 // when the dh parameters differ. -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { // Same primes, but different generator. list.push([{ group: 'modp5' }, { prime: group.getPrime(), generator: 5 }]); // Same generator, but different primes. @@ -161,7 +162,7 @@ for (const [params1, params2] of list) { assert.throws(() => { test(crypto.generateKeyPairSync('dh', params1), crypto.generateKeyPairSync('dh', params2)); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { name: 'Error', code: 'ERR_OSSL_MISMATCHING_DOMAIN_PARAMETERS' } : { @@ -220,7 +221,7 @@ const not256k1 = crypto.getCurves().find((c) => /^sec.*(224|384|512)/.test(c)); assert.throws(() => { test(crypto.generateKeyPairSync('ec', { namedCurve: 'secp256k1' }), crypto.generateKeyPairSync('ec', { namedCurve: not256k1 })); -}, common.hasOpenSSL3 ? { +}, hasOpenSSL3 ? { name: 'Error', code: 'ERR_OSSL_MISMATCHING_DOMAIN_PARAMETERS' } : { diff --git a/test/parallel/test-crypto-dh.js b/test/parallel/test-crypto-dh.js index 9ebe14011eed22..d7ffbe5eca9273 100644 --- a/test/parallel/test-crypto-dh.js +++ b/test/parallel/test-crypto-dh.js @@ -1,13 +1,18 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); +const { + hasOpenSSL3, + hasOpenSSL, +} = require('../common/crypto'); { - const size = common.hasFipsCrypto || common.hasOpenSSL3 ? 1024 : 256; + const size = crypto.getFips() || hasOpenSSL3 ? 1024 : 256; const dh1 = crypto.createDiffieHellman(size); const p1 = dh1.getPrime('buffer'); const dh2 = crypto.createDiffieHellman(p1, 'buffer'); @@ -53,7 +58,7 @@ const crypto = require('crypto'); assert.strictEqual(secret1, secret4); let wrongBlockLength; - if (common.hasOpenSSL3) { + if (hasOpenSSL3) { wrongBlockLength = { message: 'error:1C80006B:Provider routines::wrong final block length', code: 'ERR_OSSL_WRONG_FINAL_BLOCK_LENGTH', @@ -87,11 +92,11 @@ const crypto = require('crypto'); { // Error message was changed in OpenSSL 3.0.x from 3.0.12, and 3.1.x from 3.1.4. - const hasOpenSSL3WithNewErrorMessage = (common.hasOpenSSL(3, 0, 12) && !common.hasOpenSSL(3, 1, 0)) || - (common.hasOpenSSL(3, 1, 4)); + const hasOpenSSL3WithNewErrorMessage = (hasOpenSSL(3, 0, 12) && !hasOpenSSL(3, 1, 0)) || + (hasOpenSSL(3, 1, 4)); assert.throws(() => { dh3.computeSecret(''); - }, { message: common.hasOpenSSL3 && !hasOpenSSL3WithNewErrorMessage ? + }, { message: hasOpenSSL3 && !hasOpenSSL3WithNewErrorMessage ? 'Unspecified validation error' : 'Supplied key is too small' }); } diff --git a/test/parallel/test-crypto-ecb.js b/test/parallel/test-crypto-ecb.js index aecd858ef3bf1e..6439c9354a059e 100644 --- a/test/parallel/test-crypto-ecb.js +++ b/test/parallel/test-crypto-ecb.js @@ -21,18 +21,23 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { hasOpenSSL3 } = require('../common/crypto'); +const crypto = require('crypto'); -if (common.hasFipsCrypto) +if (crypto.getFips()) { common.skip('BF-ECB is not FIPS 140-2 compatible'); +} -if (common.hasOpenSSL3) +if (hasOpenSSL3) { common.skip('Blowfish is only available with the legacy provider in ' + 'OpenSSl 3.x'); +} const assert = require('assert'); -const crypto = require('crypto'); // Testing whether EVP_CipherInit_ex is functioning correctly. // Reference: bug#1997 diff --git a/test/parallel/test-crypto-fips.js b/test/parallel/test-crypto-fips.js index 8a8a8089a3cf3b..de004b9a9e8f23 100644 --- a/test/parallel/test-crypto-fips.js +++ b/test/parallel/test-crypto-fips.js @@ -10,6 +10,7 @@ const path = require('path'); const fixtures = require('../common/fixtures'); const { internalBinding } = require('internal/test/binding'); const { testFipsCrypto } = internalBinding('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); const FIPS_ENABLED = 1; const FIPS_DISABLED = 0; @@ -114,7 +115,7 @@ assert.ok(test_result === 1 || test_result === 0); // ("Error: Cannot set FIPS mode in a non-FIPS build."). // Due to this uncertainty the following tests are skipped when configured // with --shared-openssl. -if (!sharedOpenSSL() && !common.hasOpenSSL3) { +if (!sharedOpenSSL() && !hasOpenSSL3) { // OpenSSL config file should be able to turn on FIPS mode testHelper( 'stdout', @@ -144,7 +145,7 @@ if (!sharedOpenSSL() && !common.hasOpenSSL3) { // will not work as expected with that version. // TODO(danbev) Revisit these test once FIPS support is available in // OpenSSL 3.x. -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { testHelper( 'stdout', [`--openssl-config=${CNF_FIPS_OFF}`], diff --git a/test/parallel/test-crypto-hash.js b/test/parallel/test-crypto-hash.js index ca8f630b4bb7e7..61145aee0727fb 100644 --- a/test/parallel/test-crypto-hash.js +++ b/test/parallel/test-crypto-hash.js @@ -1,13 +1,14 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); const fs = require('fs'); -const { hasOpenSSL } = common; +const { hasOpenSSL } = require('../common/crypto'); const fixtures = require('../common/fixtures'); let cryptoType; @@ -40,7 +41,7 @@ a8.write(''); a8.end(); a8 = a8.read(); -if (!common.hasFipsCrypto) { +if (!crypto.getFips()) { cryptoType = 'md5'; digest = 'latin1'; const a0 = crypto.createHash(cryptoType).update('Test123').digest(digest); diff --git a/test/parallel/test-crypto-hkdf.js b/test/parallel/test-crypto-hkdf.js index ff3abdf291efcd..3f7e61e9b2ebc0 100644 --- a/test/parallel/test-crypto-hkdf.js +++ b/test/parallel/test-crypto-hkdf.js @@ -13,6 +13,7 @@ const { hkdfSync, getHashes } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); { assert.throws(() => hkdf(), { @@ -124,7 +125,7 @@ const algorithms = [ ['sha256', '', 'salt', '', 10], ['sha512', 'secret', 'salt', '', 15], ]; -if (!common.hasOpenSSL3) +if (!hasOpenSSL3) algorithms.push(['whirlpool', 'secret', '', 'info', 20]); algorithms.forEach(([ hash, secret, salt, info, length ]) => { @@ -215,7 +216,7 @@ algorithms.forEach(([ hash, secret, salt, info, length ]) => { }); -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { const kKnownUnsupported = ['shake128', 'shake256']; getHashes() .filter((hash) => !kKnownUnsupported.includes(hash)) diff --git a/test/parallel/test-crypto-hmac.js b/test/parallel/test-crypto-hmac.js index 62a6ac18d25265..afb5f74cbf076b 100644 --- a/test/parallel/test-crypto-hmac.js +++ b/test/parallel/test-crypto-hmac.js @@ -1,7 +1,8 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const crypto = require('crypto'); @@ -40,7 +41,7 @@ assert.throws( function testHmac(algo, key, data, expected) { // FIPS does not support MD5. - if (common.hasFipsCrypto && algo === 'md5') + if (crypto.getFips() && algo === 'md5') return; if (!Array.isArray(data)) diff --git a/test/parallel/test-crypto-key-objects.js b/test/parallel/test-crypto-key-objects.js index f5271f16d346c0..0c516d80950694 100644 --- a/test/parallel/test-crypto-key-objects.js +++ b/test/parallel/test-crypto-key-objects.js @@ -24,6 +24,8 @@ const { generateKeyPairSync, } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + const fixtures = require('../common/fixtures'); const publicPem = fixtures.readKey('rsa_public.pem', 'ascii'); @@ -297,7 +299,7 @@ const privateDsa = fixtures.readKey('dsa_private_encrypted_1025.pem', // This should not cause a crash: https://github.com/nodejs/node/issues/25247 assert.throws(() => { createPrivateKey({ key: '' }); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { message: 'error:1E08010C:DECODER routines::unsupported', } : { message: 'error:0909006C:PEM routines:get_name:no start line', @@ -323,7 +325,7 @@ const privateDsa = fixtures.readKey('dsa_private_encrypted_1025.pem', type: 'pkcs1' }); createPrivateKey({ key, format: 'der', type: 'pkcs1' }); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { message: /error:1E08010C:DECODER routines::unsupported/, library: 'DECODER routines' } : { @@ -510,7 +512,7 @@ const privateDsa = fixtures.readKey('dsa_private_encrypted_1025.pem', { // Reading an encrypted key without a passphrase should fail. - assert.throws(() => createPrivateKey(privateDsa), common.hasOpenSSL3 ? { + assert.throws(() => createPrivateKey(privateDsa), hasOpenSSL3 ? { name: 'Error', message: 'error:07880109:common libcrypto routines::interrupted or ' + 'cancelled', @@ -526,7 +528,7 @@ const privateDsa = fixtures.readKey('dsa_private_encrypted_1025.pem', key: privateDsa, format: 'pem', passphrase: Buffer.alloc(1025, 'a') - }), common.hasOpenSSL3 ? { name: 'Error' } : { + }), hasOpenSSL3 ? { name: 'Error' } : { code: 'ERR_OSSL_PEM_BAD_PASSWORD_READ', name: 'Error' }); diff --git a/test/parallel/test-crypto-keygen-async-dsa-key-object.js b/test/parallel/test-crypto-keygen-async-dsa-key-object.js index c15807295541e2..a3df136230d0f8 100644 --- a/test/parallel/test-crypto-keygen-async-dsa-key-object.js +++ b/test/parallel/test-crypto-keygen-async-dsa-key-object.js @@ -9,23 +9,25 @@ const { generateKeyPair, } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + // Test async DSA key object generation. { generateKeyPair('dsa', { - modulusLength: common.hasOpenSSL3 ? 2048 : 512, + modulusLength: hasOpenSSL3 ? 2048 : 512, divisorLength: 256 }, common.mustSucceed((publicKey, privateKey) => { assert.strictEqual(publicKey.type, 'public'); assert.strictEqual(publicKey.asymmetricKeyType, 'dsa'); assert.deepStrictEqual(publicKey.asymmetricKeyDetails, { - modulusLength: common.hasOpenSSL3 ? 2048 : 512, + modulusLength: hasOpenSSL3 ? 2048 : 512, divisorLength: 256 }); assert.strictEqual(privateKey.type, 'private'); assert.strictEqual(privateKey.asymmetricKeyType, 'dsa'); assert.deepStrictEqual(privateKey.asymmetricKeyDetails, { - modulusLength: common.hasOpenSSL3 ? 2048 : 512, + modulusLength: hasOpenSSL3 ? 2048 : 512, divisorLength: 256 }); })); diff --git a/test/parallel/test-crypto-keygen-async-dsa.js b/test/parallel/test-crypto-keygen-async-dsa.js index 048c0ce6fb92ef..41968d8cc23365 100644 --- a/test/parallel/test-crypto-keygen-async-dsa.js +++ b/test/parallel/test-crypto-keygen-async-dsa.js @@ -14,6 +14,8 @@ const { spkiExp, } = require('../common/crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + // Test async DSA key generation. { const privateKeyEncoding = { @@ -22,7 +24,7 @@ const { }; generateKeyPair('dsa', { - modulusLength: common.hasOpenSSL3 ? 2048 : 512, + modulusLength: hasOpenSSL3 ? 2048 : 512, divisorLength: 256, publicKeyEncoding: { type: 'spki', @@ -39,8 +41,8 @@ const { // The private key is DER-encoded. assert(Buffer.isBuffer(privateKeyDER)); - assertApproximateSize(publicKey, common.hasOpenSSL3 ? 1194 : 440); - assertApproximateSize(privateKeyDER, common.hasOpenSSL3 ? 721 : 336); + assertApproximateSize(publicKey, hasOpenSSL3 ? 1194 : 440); + assertApproximateSize(privateKeyDER, hasOpenSSL3 ? 721 : 336); // Since the private key is encrypted, signing shouldn't work anymore. assert.throws(() => { diff --git a/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted-p256.js b/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted-p256.js index 553674774571d3..55aa3831c4233b 100644 --- a/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted-p256.js +++ b/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted-p256.js @@ -14,6 +14,8 @@ const { pkcs8EncExp, } = require('../common/crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + // Test async elliptic curve key generation, e.g. for ECDSA, with an encrypted // private key with paramEncoding explicit. { @@ -38,7 +40,7 @@ const { // Since the private key is encrypted, signing shouldn't work anymore. assert.throws(() => testSignVerify(publicKey, privateKey), - common.hasOpenSSL3 ? { + hasOpenSSL3 ? { message: 'error:07880109:common libcrypto ' + 'routines::interrupted or cancelled' } : { diff --git a/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted.js.js b/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted.js.js index 79a132eed0b854..8a55d4338bc72f 100644 --- a/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted.js.js +++ b/test/parallel/test-crypto-keygen-async-explicit-elliptic-curve-encrypted.js.js @@ -12,6 +12,7 @@ const { testSignVerify, spkiExp, sec1EncExp, + hasOpenSSL3, } = require('../common/crypto'); { @@ -38,7 +39,7 @@ const { // Since the private key is encrypted, signing shouldn't work anymore. assert.throws(() => testSignVerify(publicKey, privateKey), - common.hasOpenSSL3 ? { + hasOpenSSL3 ? { message: 'error:07880109:common libcrypto ' + 'routines::interrupted or cancelled' } : { diff --git a/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted-p256.js b/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted-p256.js index 5e7d1a6c9b6611..4c11401d0fc516 100644 --- a/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted-p256.js +++ b/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted-p256.js @@ -12,6 +12,7 @@ const { testSignVerify, spkiExp, pkcs8EncExp, + hasOpenSSL3, } = require('../common/crypto'); // Test async elliptic curve key generation, e.g. for ECDSA, with an encrypted @@ -38,7 +39,7 @@ const { // Since the private key is encrypted, signing shouldn't work anymore. assert.throws(() => testSignVerify(publicKey, privateKey), - common.hasOpenSSL3 ? { + hasOpenSSL3 ? { message: 'error:07880109:common libcrypto ' + 'routines::interrupted or cancelled' } : { diff --git a/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted.js b/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted.js index 1cc93d0a794931..0503ff74787f37 100644 --- a/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted.js +++ b/test/parallel/test-crypto-keygen-async-named-elliptic-curve-encrypted.js @@ -12,6 +12,7 @@ const { testSignVerify, spkiExp, sec1EncExp, + hasOpenSSL3, } = require('../common/crypto'); { @@ -38,7 +39,7 @@ const { // Since the private key is encrypted, signing shouldn't work anymore. assert.throws(() => testSignVerify(publicKey, privateKey), - common.hasOpenSSL3 ? { + hasOpenSSL3 ? { message: 'error:07880109:common libcrypto ' + 'routines::interrupted or cancelled' } : { diff --git a/test/parallel/test-crypto-keygen-async-rsa.js b/test/parallel/test-crypto-keygen-async-rsa.js index f4a83809dc73c7..c80d7d33492923 100644 --- a/test/parallel/test-crypto-keygen-async-rsa.js +++ b/test/parallel/test-crypto-keygen-async-rsa.js @@ -13,6 +13,7 @@ const { testEncryptDecrypt, testSignVerify, pkcs1EncExp, + hasOpenSSL3, } = require('../common/crypto'); // Test async RSA key generation with an encrypted private key. @@ -43,7 +44,7 @@ const { type: 'pkcs1', format: 'der', }; - const expectedError = common.hasOpenSSL3 ? { + const expectedError = hasOpenSSL3 ? { name: 'Error', message: 'error:07880109:common libcrypto routines::interrupted or ' + 'cancelled' diff --git a/test/parallel/test-crypto-keygen-bit-length.js b/test/parallel/test-crypto-keygen-bit-length.js index 08772ba2e496b8..63a80659bb2f53 100644 --- a/test/parallel/test-crypto-keygen-bit-length.js +++ b/test/parallel/test-crypto-keygen-bit-length.js @@ -8,6 +8,7 @@ const assert = require('assert'); const { generateKeyPair, } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); // This tests check that generateKeyPair returns correct bit length in // KeyObject's asymmetricKeyDetails. @@ -27,7 +28,7 @@ const { assert.strictEqual(publicKey.asymmetricKeyDetails.modulusLength, 513); })); - if (common.hasOpenSSL3) { + if (hasOpenSSL3) { generateKeyPair('dsa', { modulusLength: 2049, divisorLength: 256, diff --git a/test/parallel/test-crypto-keygen-empty-passphrase-no-prompt.js b/test/parallel/test-crypto-keygen-empty-passphrase-no-prompt.js index 7679a492c3194c..cb873ff04748b7 100644 --- a/test/parallel/test-crypto-keygen-empty-passphrase-no-prompt.js +++ b/test/parallel/test-crypto-keygen-empty-passphrase-no-prompt.js @@ -11,6 +11,7 @@ const { } = require('crypto'); const { testSignVerify, + hasOpenSSL3, } = require('../common/crypto'); // Passing an empty passphrase string should not cause OpenSSL's default @@ -40,7 +41,7 @@ for (const type of ['pkcs1', 'pkcs8']) { // the key, and not specifying a passphrase should fail when decoding it. assert.throws(() => { return testSignVerify(publicKey, privateKey); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { name: 'Error', code: 'ERR_OSSL_CRYPTO_INTERRUPTED_OR_CANCELLED', message: 'error:07880109:common libcrypto routines::interrupted or cancelled' diff --git a/test/parallel/test-crypto-keygen-missing-oid.js b/test/parallel/test-crypto-keygen-missing-oid.js index f7fefe13848d4b..1e4f309292eb47 100644 --- a/test/parallel/test-crypto-keygen-missing-oid.js +++ b/test/parallel/test-crypto-keygen-missing-oid.js @@ -11,6 +11,8 @@ const { getCurves, } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + // This test creates EC key pairs on curves without associated OIDs. // Specifying a key encoding should not crash. { @@ -20,7 +22,7 @@ const { continue; const expectedErrorCode = - common.hasOpenSSL3 ? 'ERR_OSSL_MISSING_OID' : 'ERR_OSSL_EC_MISSING_OID'; + hasOpenSSL3 ? 'ERR_OSSL_MISSING_OID' : 'ERR_OSSL_EC_MISSING_OID'; const params = { namedCurve, publicKeyEncoding: { diff --git a/test/parallel/test-crypto-keygen.js b/test/parallel/test-crypto-keygen.js index b09ca9e7c531ea..edaee845075668 100644 --- a/test/parallel/test-crypto-keygen.js +++ b/test/parallel/test-crypto-keygen.js @@ -14,6 +14,7 @@ const { } = require('crypto'); const { inspect } = require('util'); +const { hasOpenSSL3 } = require('../common/crypto'); // Test invalid parameter encoding. { @@ -351,7 +352,7 @@ const { inspect } = require('util'); publicExponent }, common.mustCall((err) => { assert.strictEqual(err.name, 'Error'); - assert.match(err.message, common.hasOpenSSL3 ? /exponent/ : /bad e value/); + assert.match(err.message, hasOpenSSL3 ? /exponent/ : /bad e value/); })); } } diff --git a/test/parallel/test-crypto-no-algorithm.js b/test/parallel/test-crypto-no-algorithm.js index 37db38cf613b65..06124e3d465e41 100644 --- a/test/parallel/test-crypto-no-algorithm.js +++ b/test/parallel/test-crypto-no-algorithm.js @@ -4,7 +4,9 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -if (!common.hasOpenSSL3) +const { hasOpenSSL3 } = require('../common/crypto'); + +if (!hasOpenSSL3) common.skip('this test requires OpenSSL 3.x'); const assert = require('node:assert/strict'); diff --git a/test/parallel/test-crypto-oneshot-hash.js b/test/parallel/test-crypto-oneshot-hash.js index 69051c43d9e882..861aded5dd3f60 100644 --- a/test/parallel/test-crypto-oneshot-hash.js +++ b/test/parallel/test-crypto-oneshot-hash.js @@ -8,6 +8,7 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL } = require('../common/crypto'); const fs = require('fs'); // Test errors for invalid arguments. @@ -32,7 +33,7 @@ const input = fs.readFileSync(fixtures.path('utf8_test_text.txt')); for (const method of methods) { // Skip failing tests on OpenSSL 3.4.0 - if (method.startsWith('shake') && common.hasOpenSSL(3, 4)) + if (method.startsWith('shake') && hasOpenSSL(3, 4)) continue; for (const outputEncoding of ['buffer', 'hex', 'base64', undefined]) { const oldDigest = crypto.createHash(method).update(input).digest(outputEncoding || 'hex'); diff --git a/test/parallel/test-crypto-padding.js b/test/parallel/test-crypto-padding.js index f1f14b472997e7..48cd1ed4df61aa 100644 --- a/test/parallel/test-crypto-padding.js +++ b/test/parallel/test-crypto-padding.js @@ -26,6 +26,7 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); // Input data. const ODD_LENGTH_PLAIN = 'Hello node world!'; @@ -82,7 +83,7 @@ assert.strictEqual(enc(EVEN_LENGTH_PLAIN, true), EVEN_LENGTH_ENCRYPTED); assert.throws(function() { // Input must have block length %. enc(ODD_LENGTH_PLAIN, false); -}, common.hasOpenSSL3 ? { +}, hasOpenSSL3 ? { message: 'error:1C80006B:Provider routines::wrong final block length', code: 'ERR_OSSL_WRONG_FINAL_BLOCK_LENGTH', reason: 'wrong final block length', @@ -109,7 +110,7 @@ assert.strictEqual(dec(EVEN_LENGTH_ENCRYPTED, false).length, 48); assert.throws(function() { // Must have at least 1 byte of padding (PKCS): assert.strictEqual(dec(EVEN_LENGTH_ENCRYPTED_NOPAD, true), EVEN_LENGTH_PLAIN); -}, common.hasOpenSSL3 ? { +}, hasOpenSSL3 ? { message: 'error:1C800064:Provider routines::bad decrypt', reason: 'bad decrypt', code: 'ERR_OSSL_BAD_DECRYPT', diff --git a/test/parallel/test-crypto-pbkdf2.js b/test/parallel/test-crypto-pbkdf2.js index 1f8e6a81f300e7..efd8d6eaf0d640 100644 --- a/test/parallel/test-crypto-pbkdf2.js +++ b/test/parallel/test-crypto-pbkdf2.js @@ -5,6 +5,7 @@ if (!common.hasCrypto) const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); function runPBKDF2(password, salt, iterations, keylen, hash) { const syncResult = @@ -219,7 +220,7 @@ assert.throws( } ); -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { const kNotPBKDF2Supported = ['shake128', 'shake256']; crypto.getHashes() .filter((hash) => !kNotPBKDF2Supported.includes(hash)) diff --git a/test/parallel/test-crypto-private-decrypt-gh32240.js b/test/parallel/test-crypto-private-decrypt-gh32240.js index e88227a215ba4f..1ff5b565d6d5f4 100644 --- a/test/parallel/test-crypto-private-decrypt-gh32240.js +++ b/test/parallel/test-crypto-private-decrypt-gh32240.js @@ -14,6 +14,8 @@ const { privateDecrypt, } = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); + const pair = generateKeyPairSync('rsa', { modulusLength: 512 }); const expected = Buffer.from('shibboleth'); @@ -34,7 +36,7 @@ function decrypt(key) { } decrypt(pkey); -assert.throws(() => decrypt(pkeyEncrypted), common.hasOpenSSL3 ? +assert.throws(() => decrypt(pkeyEncrypted), hasOpenSSL3 ? { message: 'error:07880109:common libcrypto routines::interrupted or ' + 'cancelled' } : { code: 'ERR_MISSING_PASSPHRASE' }); diff --git a/test/parallel/test-crypto-publicDecrypt-fails-first-time.js b/test/parallel/test-crypto-publicDecrypt-fails-first-time.js index a60b87dbf65229..1d64e08920c63b 100644 --- a/test/parallel/test-crypto-publicDecrypt-fails-first-time.js +++ b/test/parallel/test-crypto-publicDecrypt-fails-first-time.js @@ -3,11 +3,15 @@ const common = require('../common'); // Test for https://github.com/nodejs/node/issues/40814 -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} -if (!common.hasOpenSSL3) +const { hasOpenSSL3 } = require('../common/crypto'); + +if (!hasOpenSSL3) { common.skip('only openssl3'); // https://github.com/nodejs/node/pull/42793#issuecomment-1107491901 +} const assert = require('assert'); const crypto = require('crypto'); diff --git a/test/parallel/test-crypto-rsa-dsa.js b/test/parallel/test-crypto-rsa-dsa.js index 5f4fafdfffbf72..dcd5045daaf58c 100644 --- a/test/parallel/test-crypto-rsa-dsa.js +++ b/test/parallel/test-crypto-rsa-dsa.js @@ -9,6 +9,7 @@ const crypto = require('crypto'); const constants = crypto.constants; const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); // Test certificates const certPem = fixtures.readKey('rsa_cert.crt'); @@ -36,11 +37,11 @@ const openssl1DecryptError = { library: 'digital envelope routines', }; -const decryptError = common.hasOpenSSL3 ? +const decryptError = hasOpenSSL3 ? { message: 'error:1C800064:Provider routines::bad decrypt' } : openssl1DecryptError; -const decryptPrivateKeyError = common.hasOpenSSL3 ? { +const decryptPrivateKeyError = hasOpenSSL3 ? { message: 'error:1C800064:Provider routines::bad decrypt', } : openssl1DecryptError; @@ -146,7 +147,7 @@ function getBufferCopy(buf) { // Now with RSA_NO_PADDING. Plaintext needs to match key size. // OpenSSL 3.x has a rsa_check_padding that will cause an error if // RSA_NO_PADDING is used. - if (!common.hasOpenSSL3) { + if (!hasOpenSSL3) { { const plaintext = 'x'.repeat(rsaKeySize / 8); encryptedBuffer = crypto.privateEncrypt({ diff --git a/test/parallel/test-crypto-secure-heap.js b/test/parallel/test-crypto-secure-heap.js index 0e5788f00e4a44..c20b01a91a9840 100644 --- a/test/parallel/test-crypto-secure-heap.js +++ b/test/parallel/test-crypto-secure-heap.js @@ -1,21 +1,26 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} -if (common.isWindows) +if (common.isWindows) { common.skip('Not supported on Windows'); +} -if (common.isASan) +if (common.isASan) { common.skip('ASan does not play well with secure heap allocations'); +} const assert = require('assert'); const { fork } = require('child_process'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); const { secureHeapUsed, createDiffieHellman, + getFips, } = require('crypto'); if (process.argv[2] === 'child') { @@ -29,7 +34,7 @@ if (process.argv[2] === 'child') { assert.strictEqual(a.used, 0); { - const size = common.hasFipsCrypto || common.hasOpenSSL3 ? 1024 : 256; + const size = getFips() || hasOpenSSL3 ? 1024 : 256; const dh1 = createDiffieHellman(size); const p1 = dh1.getPrime('buffer'); const dh2 = createDiffieHellman(p1, 'buffer'); diff --git a/test/parallel/test-crypto-sign-verify.js b/test/parallel/test-crypto-sign-verify.js index 8a263ec3350f55..0589d60736e377 100644 --- a/test/parallel/test-crypto-sign-verify.js +++ b/test/parallel/test-crypto-sign-verify.js @@ -8,6 +8,10 @@ const fs = require('fs'); const exec = require('child_process').exec; const crypto = require('crypto'); const fixtures = require('../common/fixtures'); +const { + hasOpenSSL3, + opensslCli, +} = require('../common/crypto'); // Test certificates const certPem = fixtures.readKey('rsa_cert.crt'); @@ -62,7 +66,7 @@ const keySize = 2048; key: keyPem, padding: crypto.constants.RSA_PKCS1_OAEP_PADDING }); - }, { message: common.hasOpenSSL3 ? + }, { message: hasOpenSSL3 ? 'error:1C8000A5:Provider routines::illegal or unsupported padding mode' : 'bye, bye, error stack' }); @@ -340,7 +344,7 @@ assert.throws( key: keyPem, padding: crypto.constants.RSA_PKCS1_OAEP_PADDING }); - }, common.hasOpenSSL3 ? { + }, hasOpenSSL3 ? { code: 'ERR_OSSL_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE', message: /illegal or unsupported padding mode/, } : { @@ -599,8 +603,9 @@ assert.throws( // Note: this particular test *must* be the last in this file as it will exit // early if no openssl binary is found { - if (!common.opensslCli) + if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); + } const pubfile = fixtures.path('keys', 'rsa_public_2048.pem'); const privkey = fixtures.readKey('rsa_private_2048.pem'); @@ -622,7 +627,7 @@ assert.throws( fs.writeFileSync(msgfile, msg); exec(...common.escapePOSIXShell`"${ - common.opensslCli}" dgst -sha256 -verify "${pubfile}" -signature "${ + opensslCli}" dgst -sha256 -verify "${pubfile}" -signature "${ sigfile}" -sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:-2 "${msgfile }"`, common.mustCall((err, stdout, stderr) => { assert(stdout.includes('Verified OK')); diff --git a/test/parallel/test-crypto-stream.js b/test/parallel/test-crypto-stream.js index 008ab129f0e019..62be4eaf6edfb0 100644 --- a/test/parallel/test-crypto-stream.js +++ b/test/parallel/test-crypto-stream.js @@ -21,14 +21,16 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const stream = require('stream'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); -if (!common.hasFipsCrypto) { +if (!crypto.getFips()) { // Small stream to buffer converter class Stream2buffer extends stream.Writable { constructor(callback) { @@ -71,7 +73,7 @@ const cipher = crypto.createCipheriv('aes-128-cbc', key, iv); const decipher = crypto.createDecipheriv('aes-128-cbc', badkey, iv); cipher.pipe(decipher) - .on('error', common.expectsError(common.hasOpenSSL3 ? { + .on('error', common.expectsError(hasOpenSSL3 ? { message: /bad decrypt/, library: 'Provider routines', reason: 'bad decrypt', diff --git a/test/parallel/test-crypto-x509.js b/test/parallel/test-crypto-x509.js index bd906c25b9ee19..f75e1d63470bfb 100644 --- a/test/parallel/test-crypto-x509.js +++ b/test/parallel/test-crypto-x509.js @@ -18,6 +18,7 @@ const { const assert = require('assert'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); const { readFileSync } = require('fs'); const cert = readFileSync(fixtures.path('keys', 'agent1-cert.pem')); @@ -50,7 +51,7 @@ emailAddress=ry@tinyclouds.org`; let infoAccessCheck = `OCSP - URI:http://ocsp.nodejs.org/ CA Issuers - URI:http://ca.nodejs.org/ca.cert`; -if (!common.hasOpenSSL3) +if (!hasOpenSSL3) infoAccessCheck += '\n'; const der = Buffer.from( @@ -357,7 +358,7 @@ UcXd/5qu2GhokrKU2cPttU+XAN2Om6a0 const cert = new X509Certificate(certPem); assert.throws(() => cert.publicKey, { - message: common.hasOpenSSL3 ? /decode error/ : /wrong tag/, + message: hasOpenSSL3 ? /decode error/ : /wrong tag/, name: 'Error' }); diff --git a/test/parallel/test-crypto.js b/test/parallel/test-crypto.js index 4271121881379b..93644e016de447 100644 --- a/test/parallel/test-crypto.js +++ b/test/parallel/test-crypto.js @@ -29,6 +29,7 @@ const assert = require('assert'); const crypto = require('crypto'); const tls = require('tls'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); // Test Certificates const certPfx = fixtures.readKey('rsa_cert.pfx'); @@ -208,9 +209,9 @@ assert.throws(() => { ].join('\n'); crypto.createSign('SHA256').update('test').sign(priv); }, (err) => { - if (!common.hasOpenSSL3) + if (!hasOpenSSL3) assert.ok(!('opensslErrorStack' in err)); - assert.throws(() => { throw err; }, common.hasOpenSSL3 ? { + assert.throws(() => { throw err; }, hasOpenSSL3 ? { name: 'Error', message: 'error:02000070:rsa routines::digest too big for rsa key', library: 'rsa routines', @@ -225,7 +226,7 @@ assert.throws(() => { return true; }); -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { assert.throws(() => { // The correct header inside `rsa_private_pkcs8_bad.pem` should have been // -----BEGIN PRIVATE KEY----- and -----END PRIVATE KEY----- diff --git a/test/parallel/test-dsa-fips-invalid-key.js b/test/parallel/test-dsa-fips-invalid-key.js index 05cc1d143aca6e..3df51bfbed3517 100644 --- a/test/parallel/test-dsa-fips-invalid-key.js +++ b/test/parallel/test-dsa-fips-invalid-key.js @@ -1,12 +1,18 @@ 'use strict'; const common = require('../common'); + +if (!common.hasCrypto) { + common.skip('no crypto'); +} + const fixtures = require('../common/fixtures'); +const crypto = require('crypto'); -if (!common.hasFipsCrypto) +if (!crypto.getFips()) { common.skip('node compiled without FIPS OpenSSL.'); +} const assert = require('assert'); -const crypto = require('crypto'); const input = 'hello'; diff --git a/test/parallel/test-https-agent-session-eviction.js b/test/parallel/test-https-agent-session-eviction.js index e0986e53c1103b..6f88e81e9ff29d 100644 --- a/test/parallel/test-https-agent-session-eviction.js +++ b/test/parallel/test-https-agent-session-eviction.js @@ -2,10 +2,13 @@ 'use strict'; const common = require('../common'); -const { readKey } = require('../common/fixtures'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { readKey } = require('../common/fixtures'); +const { hasOpenSSL } = require('../common/crypto'); const https = require('https'); const { SSL_OP_NO_TICKET } = require('crypto').constants; @@ -56,7 +59,7 @@ function faultyServer(port) { function second(server, session) { const req = https.request({ port: server.address().port, - ciphers: (common.hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), + ciphers: (hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), rejectUnauthorized: false }, function(res) { res.resume(); diff --git a/test/parallel/test-https-client-renegotiation-limit.js b/test/parallel/test-https-client-renegotiation-limit.js index 35fcc6bfcc6e43..18a602d738c316 100644 --- a/test/parallel/test-https-client-renegotiation-limit.js +++ b/test/parallel/test-https-client-renegotiation-limit.js @@ -21,11 +21,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} const assert = require('assert'); const tls = require('tls'); diff --git a/test/parallel/test-https-foafssl.js b/test/parallel/test-https-foafssl.js index d6dde97a41da9c..df375e7d22201e 100644 --- a/test/parallel/test-https-foafssl.js +++ b/test/parallel/test-https-foafssl.js @@ -21,11 +21,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} -if (!common.opensslCli) +const { opensslCli } = require('../common/crypto'); + +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} const assert = require('assert'); const fixtures = require('../common/fixtures'); @@ -67,7 +71,7 @@ server.listen(0, function() { '-cert', fixtures.path('keys/rsa_cert_foafssl_b.crt'), '-key', fixtures.path('keys/rsa_private_b.pem')]; - const client = spawn(common.opensslCli, args); + const client = spawn(opensslCli, args); client.stdout.on('data', function(data) { console.log('response received'); diff --git a/test/parallel/test-process-env-allowed-flags-are-documented.js b/test/parallel/test-process-env-allowed-flags-are-documented.js index 2a40a821314ff3..070a88bca8c12c 100644 --- a/test/parallel/test-process-env-allowed-flags-are-documented.js +++ b/test/parallel/test-process-env-allowed-flags-are-documented.js @@ -5,6 +5,7 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const { hasOpenSSL3 } = require('../common/crypto'); const rootDir = path.resolve(__dirname, '..', '..'); const cliMd = path.join(rootDir, 'doc', 'api', 'cli.md'); @@ -43,7 +44,7 @@ for (const line of [...nodeOptionsLines, ...v8OptionsLines]) { } } -if (!common.hasOpenSSL3) { +if (!hasOpenSSL3) { documented.delete('--openssl-legacy-provider'); documented.delete('--openssl-shared-config'); } @@ -55,8 +56,8 @@ const conditionalOpts = [ filter: (opt) => { return [ '--openssl-config', - common.hasOpenSSL3 ? '--openssl-legacy-provider' : '', - common.hasOpenSSL3 ? '--openssl-shared-config' : '', + hasOpenSSL3 ? '--openssl-legacy-provider' : '', + hasOpenSSL3 ? '--openssl-shared-config' : '', '--tls-cipher-list', '--use-bundled-ca', '--use-openssl-ca', diff --git a/test/parallel/test-process-versions.js b/test/parallel/test-process-versions.js index 3b8af4b5b52526..0a2a4014f18d6b 100644 --- a/test/parallel/test-process-versions.js +++ b/test/parallel/test-process-versions.js @@ -85,11 +85,12 @@ assert.match(process.versions.modules, /^\d+$/); assert.match(process.versions.cjs_module_lexer, commonTemplate); if (common.hasCrypto) { + const { hasOpenSSL3 } = require('../common/crypto'); assert.match(process.versions.ncrypto, commonTemplate); if (process.config.variables.node_shared_openssl) { assert.ok(process.versions.openssl); } else { - const versionRegex = common.hasOpenSSL3 ? + const versionRegex = hasOpenSSL3 ? // The following also matches a development version of OpenSSL 3.x which // can be in the format '3.0.0-alpha4-dev'. This can be handy when // building and linking against the main development branch of OpenSSL. diff --git a/test/parallel/test-tls-alert-handling.js b/test/parallel/test-tls-alert-handling.js index b14438bc92d7e6..eec072796063dc 100644 --- a/test/parallel/test-tls-alert-handling.js +++ b/test/parallel/test-tls-alert-handling.js @@ -1,11 +1,19 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { + hasOpenSSL, + hasOpenSSL3, + opensslCli, +} = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI'); +} const assert = require('assert'); const net = require('net'); @@ -33,14 +41,14 @@ let iter = 0; const errorHandler = common.mustCall((err) => { let expectedErrorCode = 'ERR_SSL_WRONG_VERSION_NUMBER'; let expectedErrorReason = 'wrong version number'; - if (common.hasOpenSSL(3, 2)) { + if (hasOpenSSL(3, 2)) { expectedErrorCode = 'ERR_SSL_PACKET_LENGTH_TOO_LONG'; expectedErrorReason = 'packet length too long'; }; assert.strictEqual(err.code, expectedErrorCode); assert.strictEqual(err.library, 'SSL routines'); - if (!common.hasOpenSSL3) assert.strictEqual(err.function, 'ssl3_get_record'); + if (!hasOpenSSL3) assert.strictEqual(err.function, 'ssl3_get_record'); assert.strictEqual(err.reason, expectedErrorReason); errorReceived = true; if (canCloseServer()) @@ -96,13 +104,13 @@ function sendBADTLSRecord() { client.on('error', common.mustCall((err) => { let expectedErrorCode = 'ERR_SSL_TLSV1_ALERT_PROTOCOL_VERSION'; let expectedErrorReason = 'tlsv1 alert protocol version'; - if (common.hasOpenSSL(3, 2)) { + if (hasOpenSSL(3, 2)) { expectedErrorCode = 'ERR_SSL_TLSV1_ALERT_RECORD_OVERFLOW'; expectedErrorReason = 'tlsv1 alert record overflow'; } assert.strictEqual(err.code, expectedErrorCode); assert.strictEqual(err.library, 'SSL routines'); - if (!common.hasOpenSSL3) + if (!hasOpenSSL3) assert.strictEqual(err.function, 'ssl3_read_bytes'); assert.strictEqual(err.reason, expectedErrorReason); })); diff --git a/test/parallel/test-tls-alert.js b/test/parallel/test-tls-alert.js index e6aaaedfe59d72..23c92e7293458f 100644 --- a/test/parallel/test-tls-alert.js +++ b/test/parallel/test-tls-alert.js @@ -21,11 +21,18 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { + hasOpenSSL, + opensslCli, +} = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} const assert = require('assert'); const { execFile } = require('child_process'); @@ -42,10 +49,10 @@ const server = tls.Server({ cert: loadPEM('agent2-cert') }, null).listen(0, common.mustCall(() => { const args = ['s_client', '-quiet', '-tls1_1', - '-cipher', (common.hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), + '-cipher', (hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), '-connect', `127.0.0.1:${server.address().port}`]; - execFile(common.opensslCli, args, common.mustCall((err, _, stderr) => { + execFile(opensslCli, args, common.mustCall((err, _, stderr) => { assert.strictEqual(err.code, 1); assert.match(stderr, /SSL alert number 70/); server.close(); diff --git a/test/parallel/test-tls-alpn-server-client.js b/test/parallel/test-tls-alpn-server-client.js index 8f1a4b8e439aab..b7cd2806471e67 100644 --- a/test/parallel/test-tls-alpn-server-client.js +++ b/test/parallel/test-tls-alpn-server-client.js @@ -1,8 +1,9 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const { spawn } = require('child_process'); @@ -198,7 +199,7 @@ function TestFatalAlert() { // OpenSSL's s_client should output the TLS alert number, which is 120 // for the 'no_application_protocol' alert. - const { opensslCli } = common; + const { opensslCli } = require('../common/crypto'); if (opensslCli) { const addr = `${serverIP}:${port}`; let stderr = ''; diff --git a/test/parallel/test-tls-cert-ext-encoding.js b/test/parallel/test-tls-cert-ext-encoding.js index 4556b5791851c5..154e0cdcf02294 100644 --- a/test/parallel/test-tls-cert-ext-encoding.js +++ b/test/parallel/test-tls-cert-ext-encoding.js @@ -3,7 +3,9 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -if (common.hasOpenSSL3) +const { hasOpenSSL3 } = require('../common/crypto'); + +if (hasOpenSSL3) // TODO(danbev) This test fails with the following error: // error:0D00008F:asn1 encoding routines::no matching choice type // diff --git a/test/parallel/test-tls-client-auth.js b/test/parallel/test-tls-client-auth.js index de4c8f038ec073..b347c0a88df571 100644 --- a/test/parallel/test-tls-client-auth.js +++ b/test/parallel/test-tls-client-auth.js @@ -3,6 +3,11 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); +if (!common.hasCrypto) { + common.skip('missing crypto'); +} +const { hasOpenSSL } = require('../common/crypto'); + const { assert, connect, keys, tls } = require(fixtures.path('tls-connect')); @@ -79,7 +84,7 @@ connect({ }, function(err, pair, cleanup) { assert.strictEqual(pair.server.err.code, 'ERR_SSL_PEER_DID_NOT_RETURN_A_CERTIFICATE'); - const expectedErr = common.hasOpenSSL(3, 2) ? + const expectedErr = hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE' : 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; assert.strictEqual(pair.client.err.code, expectedErr); diff --git a/test/parallel/test-tls-client-getephemeralkeyinfo.js b/test/parallel/test-tls-client-getephemeralkeyinfo.js index 0bacd8702fc650..0f132c565e4400 100644 --- a/test/parallel/test-tls-client-getephemeralkeyinfo.js +++ b/test/parallel/test-tls-client-getephemeralkeyinfo.js @@ -3,6 +3,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); const fixtures = require('../common/fixtures'); +const { hasOpenSSL } = require('../common/crypto'); const assert = require('assert'); const { X509Certificate } = require('crypto'); @@ -69,7 +70,7 @@ function test(size, type, name, cipher) { test(undefined, undefined, undefined, 'AES256-SHA256'); test('auto', 'DH', undefined, 'DHE-RSA-AES256-GCM-SHA384'); -if (!common.hasOpenSSL(3, 2)) { +if (!hasOpenSSL(3, 2)) { test(1024, 'DH', undefined, 'DHE-RSA-AES256-GCM-SHA384'); } else { test(3072, 'DH', undefined, 'DHE-RSA-AES256-GCM-SHA384'); diff --git a/test/parallel/test-tls-client-mindhsize.js b/test/parallel/test-tls-client-mindhsize.js index 15c086842e1e4a..1ab5b5fe1bffd7 100644 --- a/test/parallel/test-tls-client-mindhsize.js +++ b/test/parallel/test-tls-client-mindhsize.js @@ -3,6 +3,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); +const { hasOpenSSL } = require('../common/crypto'); const assert = require('assert'); const tls = require('tls'); const fixtures = require('../common/fixtures'); @@ -38,7 +39,7 @@ function test(size, err, next) { // Client set minimum DH parameter size to 2048 or 3072 bits // so that it fails when it makes a connection to the tls // server where is too small - const minDHSize = common.hasOpenSSL(3, 2) ? 3072 : 2048; + const minDHSize = hasOpenSSL(3, 2) ? 3072 : 2048; const client = tls.connect({ minDHSize: minDHSize, port: this.address().port, @@ -76,7 +77,7 @@ function testDHE3072() { test(3072, false, null); } -if (common.hasOpenSSL(3, 2)) { +if (hasOpenSSL(3, 2)) { // Minimum size for OpenSSL 3.2 is 2048 by default testDHE2048(true, testDHE3072); } else { diff --git a/test/parallel/test-tls-client-renegotiation-13.js b/test/parallel/test-tls-client-renegotiation-13.js index a32baed0249a0a..38a72fb525b430 100644 --- a/test/parallel/test-tls-client-renegotiation-13.js +++ b/test/parallel/test-tls-client-renegotiation-13.js @@ -1,6 +1,12 @@ 'use strict'; const common = require('../common'); + +if (!common.hasCrypto) { + common.skip('missing crypto'); +} +const { hasOpenSSL3 } = require('../common/crypto'); + const fixtures = require('../common/fixtures'); // Confirm that for TLSv1.3, renegotiate() is disallowed. @@ -29,7 +35,7 @@ connect({ const ok = client.renegotiate({}, common.mustCall((err) => { assert.throws(() => { throw err; }, { - message: common.hasOpenSSL3 ? + message: hasOpenSSL3 ? 'error:0A00010A:SSL routines::wrong ssl version' : 'error:1420410A:SSL routines:SSL_renegotiate:wrong ssl version', code: 'ERR_SSL_WRONG_SSL_VERSION', diff --git a/test/parallel/test-tls-client-renegotiation-limit.js b/test/parallel/test-tls-client-renegotiation-limit.js index 71d7a85bae468b..b35140e8964ac1 100644 --- a/test/parallel/test-tls-client-renegotiation-limit.js +++ b/test/parallel/test-tls-client-renegotiation-limit.js @@ -21,11 +21,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} const assert = require('assert'); const tls = require('tls'); diff --git a/test/parallel/test-tls-dhe.js b/test/parallel/test-tls-dhe.js index 21739ce42428eb..25b58191e1d413 100644 --- a/test/parallel/test-tls-dhe.js +++ b/test/parallel/test-tls-dhe.js @@ -22,11 +22,18 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { + hasOpenSSL, + opensslCli, +} = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('missing openssl-cli'); +} const assert = require('assert'); const { X509Certificate } = require('crypto'); @@ -43,7 +50,7 @@ const dheCipher = 'DHE-RSA-AES128-SHA256'; const ecdheCipher = 'ECDHE-RSA-AES128-SHA256'; const ciphers = `${dheCipher}:${ecdheCipher}`; -if (!common.hasOpenSSL(3, 2)) { +if (!hasOpenSSL(3, 2)) { // Test will emit a warning because the DH parameter size is < 2048 bits // when the test is run on versions lower than OpenSSL32 common.expectWarning('SecurityWarning', @@ -70,7 +77,7 @@ function test(dhparam, keylen, expectedCipher) { const args = ['s_client', '-connect', `127.0.0.1:${server.address().port}`, '-cipher', `${ciphers}:@SECLEVEL=1`]; - execFile(common.opensslCli, args, common.mustSucceed((stdout) => { + execFile(opensslCli, args, common.mustSucceed((stdout) => { assert(keylen === null || stdout.includes(`Server Temp Key: DH, ${keylen} bits`)); assert(stdout.includes(`Cipher : ${expectedCipher}`)); @@ -107,7 +114,7 @@ function testCustomParam(keylen, expectedCipher) { }, /DH parameter is less than 1024 bits/); // Custom DHE parameters are supported (but discouraged). - if (!common.hasOpenSSL(3, 2)) { + if (!hasOpenSSL(3, 2)) { await testCustomParam(1024, dheCipher); } else { await testCustomParam(3072, dheCipher); diff --git a/test/parallel/test-tls-ecdh-auto.js b/test/parallel/test-tls-ecdh-auto.js index 11c588d8ac8ce1..adc7817b729aa8 100644 --- a/test/parallel/test-tls-ecdh-auto.js +++ b/test/parallel/test-tls-ecdh-auto.js @@ -4,11 +4,15 @@ const common = require('../common'); // This test ensures that the value "auto" on ecdhCurve option is // supported to enable automatic curve selection in TLS server. -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('missing openssl-cli'); +} const assert = require('assert'); const tls = require('tls'); @@ -36,7 +40,7 @@ const server = tls.createServer(options, (conn) => { '-cipher', `${options.ciphers}`, '-connect', `127.0.0.1:${server.address().port}`]; - execFile(common.opensslCli, args, common.mustSucceed((stdout) => { + execFile(opensslCli, args, common.mustSucceed((stdout) => { assert(stdout.includes(reply)); server.close(); })); diff --git a/test/parallel/test-tls-ecdh-multiple.js b/test/parallel/test-tls-ecdh-multiple.js index 5bf119f48bacad..957f8e0407a6de 100644 --- a/test/parallel/test-tls-ecdh-multiple.js +++ b/test/parallel/test-tls-ecdh-multiple.js @@ -4,11 +4,16 @@ const common = require('../common'); // This test ensures that ecdhCurve option of TLS server supports colon // separated ECDH curve names as value. -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); +const crypto = require('crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('missing openssl-cli'); +} const assert = require('assert'); const tls = require('tls'); @@ -36,7 +41,7 @@ const server = tls.createServer(options, (conn) => { '-cipher', `${options.ciphers}`, '-connect', `127.0.0.1:${server.address().port}`]; - execFile(common.opensslCli, args, common.mustSucceed((stdout) => { + execFile(opensslCli, args, common.mustSucceed((stdout) => { assert(stdout.includes(reply)); server.close(); })); @@ -51,8 +56,9 @@ const server = tls.createServer(options, (conn) => { ]; // Brainpool is not supported in FIPS mode. - if (common.hasFipsCrypto) + if (crypto.getFips()) { unsupportedCurves.push('brainpoolP256r1'); + } unsupportedCurves.forEach((ecdhCurve) => { assert.throws(() => tls.createServer({ ecdhCurve }), diff --git a/test/parallel/test-tls-ecdh.js b/test/parallel/test-tls-ecdh.js index 276b713f5ecf70..4d45e7f024586e 100644 --- a/test/parallel/test-tls-ecdh.js +++ b/test/parallel/test-tls-ecdh.js @@ -23,11 +23,15 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} -if (!common.opensslCli) +const { opensslCli } = require('../common/crypto'); + +if (!opensslCli) { common.skip('missing openssl-cli'); +} const assert = require('assert'); const tls = require('tls'); @@ -49,7 +53,7 @@ const server = tls.createServer(options, common.mustCall(function(conn) { })); server.listen(0, '127.0.0.1', common.mustCall(function() { - const cmd = common.escapePOSIXShell`"${common.opensslCli}" s_client -cipher ${ + const cmd = common.escapePOSIXShell`"${opensslCli}" s_client -cipher ${ options.ciphers} -connect 127.0.0.1:${this.address().port}`; exec(...cmd, common.mustSucceed((stdout, stderr) => { diff --git a/test/parallel/test-tls-empty-sni-context.js b/test/parallel/test-tls-empty-sni-context.js index 093e5cca712d2c..79f1ddd341d938 100644 --- a/test/parallel/test-tls-empty-sni-context.js +++ b/test/parallel/test-tls-empty-sni-context.js @@ -3,7 +3,7 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); - +const { hasOpenSSL } = require('../common/crypto'); const assert = require('assert'); const tls = require('tls'); @@ -26,7 +26,7 @@ const server = tls.createServer(options, (c) => { }, common.mustNotCall()); c.on('error', common.mustCall((err) => { - const expectedErr = common.hasOpenSSL(3, 2) ? + const expectedErr = hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE' : 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; assert.strictEqual(err.code, expectedErr); })); diff --git a/test/parallel/test-tls-getprotocol.js b/test/parallel/test-tls-getprotocol.js index a9c8775e2f112f..b1eab88fd6517e 100644 --- a/test/parallel/test-tls-getprotocol.js +++ b/test/parallel/test-tls-getprotocol.js @@ -3,6 +3,8 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); +const { hasOpenSSL } = require('../common/crypto'); + // This test ensures that `getProtocol` returns the right protocol // from a TLS connection @@ -14,11 +16,11 @@ const clientConfigs = [ { secureProtocol: 'TLSv1_method', version: 'TLSv1', - ciphers: (common.hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT') + ciphers: (hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT') }, { secureProtocol: 'TLSv1_1_method', version: 'TLSv1.1', - ciphers: (common.hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT') + ciphers: (hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT') }, { secureProtocol: 'TLSv1_2_method', version: 'TLSv1.2' diff --git a/test/parallel/test-tls-junk-server.js b/test/parallel/test-tls-junk-server.js index cc520383ede45f..0e536a66884e94 100644 --- a/test/parallel/test-tls-junk-server.js +++ b/test/parallel/test-tls-junk-server.js @@ -1,8 +1,11 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { hasOpenSSL } = require('../common/crypto'); const assert = require('assert'); const https = require('https'); @@ -21,7 +24,7 @@ server.listen(0, function() { req.end(); let expectedErrorMessage = new RegExp('wrong version number'); - if (common.hasOpenSSL(3, 2)) { + if (hasOpenSSL(3, 2)) { expectedErrorMessage = new RegExp('packet length too long'); }; req.once('error', common.mustCall(function(err) { diff --git a/test/parallel/test-tls-key-mismatch.js b/test/parallel/test-tls-key-mismatch.js index fdbb3676267a9d..df8848a03de4a9 100644 --- a/test/parallel/test-tls-key-mismatch.js +++ b/test/parallel/test-tls-key-mismatch.js @@ -22,14 +22,16 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const fixtures = require('../common/fixtures'); +const { hasOpenSSL3 } = require('../common/crypto'); const assert = require('assert'); const tls = require('tls'); -const errorMessageRegex = common.hasOpenSSL3 ? +const errorMessageRegex = hasOpenSSL3 ? /^Error: error:05800074:x509 certificate routines::key values mismatch$/ : /^Error: error:0B080074:x509 certificate routines:X509_check_private_key:key values mismatch$/; diff --git a/test/parallel/test-tls-legacy-pfx.js b/test/parallel/test-tls-legacy-pfx.js index 33b4c58fc6ccc3..5106217718dbdc 100644 --- a/test/parallel/test-tls-legacy-pfx.js +++ b/test/parallel/test-tls-legacy-pfx.js @@ -1,9 +1,14 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); -if (!common.hasOpenSSL3) +} + +const { hasOpenSSL3 } = require('../common/crypto'); + +if (!hasOpenSSL3) { common.skip('OpenSSL legacy failures are only testable with OpenSSL 3+'); +} const fixtures = require('../common/fixtures'); diff --git a/test/parallel/test-tls-min-max-version.js b/test/parallel/test-tls-min-max-version.js index af32468eea6a68..4903d92f5c5700 100644 --- a/test/parallel/test-tls-min-max-version.js +++ b/test/parallel/test-tls-min-max-version.js @@ -1,5 +1,13 @@ 'use strict'; const common = require('../common'); + +if (!common.hasCrypto) { + common.skip('missing crypto'); +} +const { + hasOpenSSL, + hasOpenSSL3, +} = require('../common/crypto'); const fixtures = require('../common/fixtures'); const { inspect } = require('util'); @@ -16,13 +24,13 @@ function test(cmin, cmax, cprot, smin, smax, sprot, proto, cerr, serr) { assert(proto || cerr || serr, 'test missing any expectations'); let ciphers; - if (common.hasOpenSSL3 && (proto === 'TLSv1' || proto === 'TLSv1.1' || + if (hasOpenSSL3 && (proto === 'TLSv1' || proto === 'TLSv1.1' || proto === 'TLSv1_1_method' || proto === 'TLSv1_method' || sprot === 'TLSv1_1_method' || sprot === 'TLSv1_method')) { if (serr !== 'ERR_SSL_UNSUPPORTED_PROTOCOL') ciphers = 'ALL@SECLEVEL=0'; } - if (common.hasOpenSSL(3, 1) && cerr === 'ERR_SSL_TLSV1_ALERT_PROTOCOL_VERSION') { + if (hasOpenSSL(3, 1) && cerr === 'ERR_SSL_TLSV1_ALERT_PROTOCOL_VERSION') { ciphers = 'DEFAULT@SECLEVEL=0'; } // Report where test was called from. Strip leading garbage from @@ -125,9 +133,9 @@ test(U, U, 'TLS_method', U, U, 'TLSv1_method', 'TLSv1'); // OpenSSL 1.1.1 and 3.0 use a different error code and alert (sent to the // client) when no protocols are enabled on the server. -const NO_PROTOCOLS_AVAILABLE_SERVER = common.hasOpenSSL3 ? +const NO_PROTOCOLS_AVAILABLE_SERVER = hasOpenSSL3 ? 'ERR_SSL_NO_PROTOCOLS_AVAILABLE' : 'ERR_SSL_INTERNAL_ERROR'; -const NO_PROTOCOLS_AVAILABLE_SERVER_ALERT = common.hasOpenSSL3 ? +const NO_PROTOCOLS_AVAILABLE_SERVER_ALERT = hasOpenSSL3 ? 'ERR_SSL_TLSV1_ALERT_PROTOCOL_VERSION' : 'ERR_SSL_TLSV1_ALERT_INTERNAL_ERROR'; // SSLv23 also means "any supported protocol" greater than the default diff --git a/test/parallel/test-tls-no-sslv3.js b/test/parallel/test-tls-no-sslv3.js index 9282beb4bdac2c..cd5f4ad944a6c5 100644 --- a/test/parallel/test-tls-no-sslv3.js +++ b/test/parallel/test-tls-no-sslv3.js @@ -1,10 +1,14 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} -if (common.opensslCli === false) +const { opensslCli } = require('../common/crypto'); + +if (opensslCli === false) { common.skip('node compiled without OpenSSL CLI.'); +} const assert = require('assert'); const tls = require('tls'); @@ -23,7 +27,7 @@ server.listen(0, '127.0.0.1', function() { '-ssl3', '-connect', address]; - const client = spawn(common.opensslCli, args, { stdio: 'pipe' }); + const client = spawn(opensslCli, args, { stdio: 'pipe' }); client.stdout.pipe(process.stdout); client.stderr.pipe(process.stderr); client.stderr.setEncoding('utf8'); diff --git a/test/parallel/test-tls-ocsp-callback.js b/test/parallel/test-tls-ocsp-callback.js index 04a60a0890c506..bdf622d4686ec1 100644 --- a/test/parallel/test-tls-ocsp-callback.js +++ b/test/parallel/test-tls-ocsp-callback.js @@ -22,12 +22,17 @@ 'use strict'; const common = require('../common'); -if (!common.opensslCli) - common.skip('node compiled without OpenSSL CLI.'); - -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); + +if (!opensslCli) { + common.skip('node compiled without OpenSSL CLI.'); +} +const crypto = require('crypto'); const tls = require('tls'); const fixtures = require('../common/fixtures'); @@ -108,6 +113,6 @@ test({ ocsp: true, response: false }); test({ ocsp: true, response: 'hello world' }); test({ ocsp: false }); -if (!common.hasFipsCrypto) { +if (!crypto.getFips()) { test({ ocsp: true, response: 'hello pfx', pfx: pfx, passphrase: 'sample' }); } diff --git a/test/parallel/test-tls-psk-circuit.js b/test/parallel/test-tls-psk-circuit.js index c06e61c321ef67..61861ecf4dafa6 100644 --- a/test/parallel/test-tls-psk-circuit.js +++ b/test/parallel/test-tls-psk-circuit.js @@ -1,9 +1,11 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} +const { hasOpenSSL } = require('../common/crypto'); const assert = require('assert'); const tls = require('tls'); @@ -62,12 +64,12 @@ test({ psk: USERS.UserA, identity: 'UserA' }, { minVersion: 'TLSv1.3' }); test({ psk: USERS.UserB, identity: 'UserB' }); test({ psk: USERS.UserB, identity: 'UserB' }, { minVersion: 'TLSv1.3' }); // Unrecognized user should fail handshake -const expectedHandshakeErr = common.hasOpenSSL(3, 2) ? +const expectedHandshakeErr = hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE' : 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; test({ psk: USERS.UserB, identity: 'UserC' }, {}, expectedHandshakeErr); // Recognized user but incorrect secret should fail handshake -const expectedIllegalParameterErr = common.hasOpenSSL(3, 4) ? 'ERR_SSL_TLSV1_ALERT_DECRYPT_ERROR' : - common.hasOpenSSL(3, 2) ? +const expectedIllegalParameterErr = hasOpenSSL(3, 4) ? 'ERR_SSL_TLSV1_ALERT_DECRYPT_ERROR' : + hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_ILLEGAL_PARAMETER' : 'ERR_SSL_SSLV3_ALERT_ILLEGAL_PARAMETER'; test({ psk: USERS.UserA, identity: 'UserB' }, {}, expectedIllegalParameterErr); test({ psk: USERS.UserB, identity: 'UserB' }); diff --git a/test/parallel/test-tls-psk-server.js b/test/parallel/test-tls-psk-server.js index b9260958401522..87fad86083e1ab 100644 --- a/test/parallel/test-tls-psk-server.js +++ b/test/parallel/test-tls-psk-server.js @@ -1,10 +1,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); -if (!common.opensslCli) +} + +const { opensslCli } = require('../common/crypto'); + +if (!opensslCli) { common.skip('missing openssl cli'); +} const assert = require('assert'); @@ -41,7 +46,7 @@ let sentWorld = false; let gotWorld = false; server.listen(0, () => { - const client = spawn(common.opensslCli, [ + const client = spawn(opensslCli, [ 's_client', '-connect', `127.0.0.1:${server.address().port}`, '-cipher', CIPHERS, diff --git a/test/parallel/test-tls-securepair-server.js b/test/parallel/test-tls-securepair-server.js index 78cd9f725401ed..fb4ebe6a2511cf 100644 --- a/test/parallel/test-tls-securepair-server.js +++ b/test/parallel/test-tls-securepair-server.js @@ -21,11 +21,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('missing openssl-cli'); +} const assert = require('assert'); const tls = require('tls'); @@ -109,7 +113,7 @@ server.listen(0, common.mustCall(function() { const args = ['s_client', '-connect', `127.0.0.1:${this.address().port}`]; - const client = spawn(common.opensslCli, args); + const client = spawn(opensslCli, args); let out = ''; diff --git a/test/parallel/test-tls-server-verify.js b/test/parallel/test-tls-server-verify.js index 51ccd0d747fdf5..2517c7c8dbbb1f 100644 --- a/test/parallel/test-tls-server-verify.js +++ b/test/parallel/test-tls-server-verify.js @@ -22,11 +22,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} // This is a rather complex test which sets up various TLS servers with node // and connects to them using the 'openssl s_client' command line utility @@ -188,7 +192,7 @@ function runClient(prefix, port, options, cb) { } // To test use: openssl s_client -connect localhost:8000 - const client = spawn(common.opensslCli, args); + const client = spawn(opensslCli, args); let out = ''; diff --git a/test/parallel/test-tls-session-cache.js b/test/parallel/test-tls-session-cache.js index b55e150401d8a2..9524764aa609ee 100644 --- a/test/parallel/test-tls-session-cache.js +++ b/test/parallel/test-tls-session-cache.js @@ -21,17 +21,23 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} +const { + hasOpenSSL, + opensslCli, +} = require('../common/crypto'); + +if (!opensslCli) { + common.skip('node compiled without OpenSSL CLI.'); +} + const fixtures = require('../common/fixtures'); const assert = require('assert'); const tls = require('tls'); const { spawn } = require('child_process'); -if (!common.opensslCli) - common.skip('node compiled without OpenSSL CLI.'); - - doTest({ tickets: false }, function() { doTest({ tickets: true }, function() { doTest({ tickets: false, invalidSession: true }, function() { @@ -100,7 +106,7 @@ function doTest(testOptions, callback) { const args = [ 's_client', '-tls1', - '-cipher', (common.hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), + '-cipher', (hasOpenSSL(3, 1) ? 'DEFAULT:@SECLEVEL=0' : 'DEFAULT'), '-connect', `localhost:${this.address().port}`, '-servername', 'ohgod', '-key', fixtures.path('keys/rsa_private.pem'), @@ -109,7 +115,7 @@ function doTest(testOptions, callback) { ].concat(testOptions.tickets ? [] : '-no_ticket'); function spawnClient() { - const client = spawn(common.opensslCli, args, { + const client = spawn(opensslCli, args, { stdio: [ 0, 1, 'pipe' ] }); let err = ''; diff --git a/test/parallel/test-tls-set-ciphers.js b/test/parallel/test-tls-set-ciphers.js index f7062e73c9403c..1e63e9376e134b 100644 --- a/test/parallel/test-tls-set-ciphers.js +++ b/test/parallel/test-tls-set-ciphers.js @@ -1,7 +1,17 @@ 'use strict'; const common = require('../common'); -if (!common.hasOpenSSL3) +if (!common.hasCrypto) { common.skip('missing crypto, or OpenSSL version lower than 3'); +} + +const { + hasOpenSSL, + hasOpenSSL3, +} = require('../common/crypto'); + +if (!hasOpenSSL3) { + common.skip('missing crypto, or OpenSSL version lower than 3'); +} const fixtures = require('../common/fixtures'); const { inspect } = require('util'); @@ -80,7 +90,7 @@ function test(cciphers, sciphers, cipher, cerr, serr, options) { const U = undefined; let expectedTLSAlertError = 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; -if (common.hasOpenSSL(3, 2)) { +if (hasOpenSSL(3, 2)) { expectedTLSAlertError = 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE'; } @@ -117,7 +127,7 @@ test(U, 'AES256-SHA', 'TLS_AES_256_GCM_SHA384', U, U, { maxVersion: 'TLSv1.3' }) // default, but work. // However, for OpenSSL32 AES_128 is not enabled due to the // default security level -if (!common.hasOpenSSL(3, 2)) { +if (!hasOpenSSL(3, 2)) { test('TLS_AES_128_CCM_8_SHA256', U, U, 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE', 'ERR_SSL_NO_SHARED_CIPHER'); diff --git a/test/parallel/test-tls-set-secure-context.js b/test/parallel/test-tls-set-secure-context.js index c056875e14ddfb..3d2de6b3321414 100644 --- a/test/parallel/test-tls-set-secure-context.js +++ b/test/parallel/test-tls-set-secure-context.js @@ -1,8 +1,9 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} // This test verifies the behavior of the tls setSecureContext() method. // It also verifies that existing connections are not disrupted when the @@ -12,6 +13,7 @@ const assert = require('assert'); const events = require('events'); const https = require('https'); const timers = require('timers/promises'); +const { hasOpenSSL3 } = require('../common/crypto'); const fixtures = require('../common/fixtures'); const credentialOptions = [ { @@ -55,7 +57,7 @@ server.listen(0, common.mustCall(() => { server.setSecureContext(credentialOptions[1]); firstResponse.write('request-'); - const errorMessageRegex = common.hasOpenSSL3 ? + const errorMessageRegex = hasOpenSSL3 ? /^Error: self-signed certificate$/ : /^Error: self signed certificate$/; await assert.rejects(makeRequest(port, 3), errorMessageRegex); diff --git a/test/parallel/test-tls-set-sigalgs.js b/test/parallel/test-tls-set-sigalgs.js index 3f3d152f4d877e..985ca13ba2ac7d 100644 --- a/test/parallel/test-tls-set-sigalgs.js +++ b/test/parallel/test-tls-set-sigalgs.js @@ -1,6 +1,9 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) common.skip('missing crypto'); +if (!common.hasCrypto) { + common.skip('missing crypto'); +} +const { hasOpenSSL } = require('../common/crypto'); const fixtures = require('../common/fixtures'); // Test sigalgs: option for TLS. @@ -63,7 +66,7 @@ test('RSA-PSS+SHA256:RSA-PSS+SHA512:ECDSA+SHA256', ['RSA-PSS+SHA256', 'ECDSA+SHA256']); // Do not have shared sigalgs. -const handshakeErr = common.hasOpenSSL(3, 2) ? +const handshakeErr = hasOpenSSL(3, 2) ? 'ERR_SSL_SSL/TLS_ALERT_HANDSHAKE_FAILURE' : 'ERR_SSL_SSLV3_ALERT_HANDSHAKE_FAILURE'; test('RSA-PSS+SHA384', 'ECDSA+SHA256', undefined, handshakeErr, diff --git a/test/parallel/test-trace-env.js b/test/parallel/test-trace-env.js index ba08e0af2aad1d..7a7b80fa4c1094 100644 --- a/test/parallel/test-trace-env.js +++ b/test/parallel/test-trace-env.js @@ -18,9 +18,11 @@ spawnSyncAndAssert(process.execPath, ['--trace-env', fixtures.path('empty.js')], } if (common.hasCrypto) { assert.match(output, /get "NODE_EXTRA_CA_CERTS"/); - } - if (common.hasOpenSSL3) { - assert.match(output, /get "OPENSSL_CONF"/); + + const { hasOpenSSL3 } = require('../common/crypto'); + if (hasOpenSSL3) { + assert.match(output, /get "OPENSSL_CONF"/); + } } assert.match(output, /get "NODE_DEBUG_NATIVE"/); assert.match(output, /get "NODE_COMPILE_CACHE"/); diff --git a/test/parallel/test-x509-escaping.js b/test/parallel/test-x509-escaping.js index e6ae4d886908cb..b507af88e1f7f3 100644 --- a/test/parallel/test-x509-escaping.js +++ b/test/parallel/test-x509-escaping.js @@ -1,15 +1,16 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} const assert = require('assert'); const { X509Certificate } = require('crypto'); const tls = require('tls'); const fixtures = require('../common/fixtures'); -const { hasOpenSSL3 } = common; +const { hasOpenSSL3 } = require('../common/crypto'); // Test that all certificate chains provided by the reporter are rejected. { diff --git a/test/pummel/test-crypto-dh-hash.js b/test/pummel/test-crypto-dh-hash.js index ef5a640688c9bb..b59f556a2042b9 100644 --- a/test/pummel/test-crypto-dh-hash.js +++ b/test/pummel/test-crypto-dh-hash.js @@ -30,7 +30,9 @@ if (common.isPi) { common.skip('Too slow for Raspberry Pi devices'); } -if (!common.hasOpenSSL3) { +const { hasOpenSSL3 } = require('../common/crypto'); + +if (!hasOpenSSL3) { common.skip('Too slow when dynamically linked against OpenSSL 1.1.1'); } diff --git a/test/pummel/test-crypto-dh-keys.js b/test/pummel/test-crypto-dh-keys.js index 2caa4e244a9859..abce6a07acf4ac 100644 --- a/test/pummel/test-crypto-dh-keys.js +++ b/test/pummel/test-crypto-dh-keys.js @@ -36,8 +36,9 @@ const crypto = require('crypto'); [ 'modp1', 'modp2', 'modp5', 'modp14', 'modp15', 'modp16', 'modp17' ] .forEach((name) => { // modp1 is 768 bits, FIPS requires >= 1024 - if (name === 'modp1' && common.hasFipsCrypto) + if (name === 'modp1' && crypto.getFips()) { return; + } const group1 = crypto.getDiffieHellman(name); const group2 = crypto.getDiffieHellman(name); group1.generateKeys(); diff --git a/test/pummel/test-dh-regr.js b/test/pummel/test-dh-regr.js index 41d5bf872f97ec..cfae57d0728bdb 100644 --- a/test/pummel/test-dh-regr.js +++ b/test/pummel/test-dh-regr.js @@ -32,10 +32,11 @@ if (common.isPi) { const assert = require('assert'); const crypto = require('crypto'); +const { hasOpenSSL3 } = require('../common/crypto'); // FIPS requires length >= 1024 but we use 512/256 in this test to keep it from // taking too long and timing out in CI. -const length = (common.hasFipsCrypto) ? 1024 : common.hasOpenSSL3 ? 512 : 256; +const length = crypto.getFips() ? 1024 : hasOpenSSL3 ? 512 : 256; const p = crypto.createDiffieHellman(length).getPrime(); diff --git a/test/sequential/test-tls-psk-client.js b/test/sequential/test-tls-psk-client.js index ddebc8f8cc9807..c07b1f92d98376 100644 --- a/test/sequential/test-tls-psk-client.js +++ b/test/sequential/test-tls-psk-client.js @@ -1,10 +1,15 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); -if (!common.opensslCli) +} + +const { opensslCli } = require('../common/crypto'); + +if (!opensslCli) { common.skip('missing openssl cli'); +} const assert = require('assert'); const tls = require('tls'); @@ -16,7 +21,7 @@ const KEY = 'd731ef57be09e5204f0b205b60627028'; const IDENTITY = 'Client_identity'; // Hardcoded by `openssl s_server` const useIPv4 = !common.hasIPv6; -const server = spawn(common.opensslCli, [ +const server = spawn(opensslCli, [ 's_server', '-accept', common.PORT, '-cipher', CIPHERS, diff --git a/test/sequential/test-tls-securepair-client.js b/test/sequential/test-tls-securepair-client.js index f3ca42ad6edfb0..262518621b5f3f 100644 --- a/test/sequential/test-tls-securepair-client.js +++ b/test/sequential/test-tls-securepair-client.js @@ -23,14 +23,19 @@ const common = require('../common'); -if (!common.opensslCli) - common.skip('node compiled without OpenSSL CLI.'); - -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); -if (common.isWindows) +if (!opensslCli) { + common.skip('node compiled without OpenSSL CLI.'); +} + +if (common.isWindows) { common.skip('test does not work on Windows'); // ...but it should! +} const net = require('net'); const assert = require('assert'); @@ -63,11 +68,11 @@ function test(keyPath, certPath, check, next) { const key = fixtures.readSync(keyPath).toString(); const cert = fixtures.readSync(certPath).toString(); - const server = spawn(common.opensslCli, ['s_server', - '-accept', 0, - '-cert', fixtures.path(certPath), - '-key', fixtures.path(keyPath), - ...(useIPv4 ? ['-4'] : []), + const server = spawn(opensslCli, ['s_server', + '-accept', 0, + '-cert', fixtures.path(certPath), + '-key', fixtures.path(keyPath), + ...(useIPv4 ? ['-4'] : []), ]); server.stdout.pipe(process.stdout); server.stderr.pipe(process.stdout); diff --git a/test/sequential/test-tls-session-timeout.js b/test/sequential/test-tls-session-timeout.js index 09107011aeda52..a93cdc793a2337 100644 --- a/test/sequential/test-tls-session-timeout.js +++ b/test/sequential/test-tls-session-timeout.js @@ -22,8 +22,11 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +const { opensslCli } = require('../common/crypto'); const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); @@ -56,8 +59,9 @@ const cert = fixtures.readKey('rsa_cert.crt'); } } -if (!common.opensslCli) +if (!opensslCli) { common.skip('node compiled without OpenSSL CLI.'); +} doTest(); @@ -105,7 +109,7 @@ function doTest() { '-sess_in', sessionFileName, '-sess_out', sessionFileName, ]; - const client = spawn(common.opensslCli, flags, { + const client = spawn(opensslCli, flags, { stdio: ['ignore', 'pipe', 'ignore'] }); From a62345e73b01c4ddb7eeaf642c69f15aa8bed6b0 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Fri, 24 Jan 2025 17:35:59 -0800 Subject: [PATCH 124/158] test: move hasMultiLocalhost to common/net Given that `common/net` already exists and hasMultiLocalhost is net specific, let's move it out of common/index to better encapsulate and simplify common/index more PR-URL: https://github.com/nodejs/node/pull/56716 Reviewed-By: Yagiz Nizipli Reviewed-By: Richard Lau Reviewed-By: Luigi Pinca --- test/common/README.md | 6 ------ test/common/index.js | 10 ---------- test/common/index.mjs | 2 -- test/common/net.js | 10 ++++++++++ test/parallel/test-http-localaddress.js | 4 +++- test/parallel/test-http2-connect-options.js | 4 +++- test/parallel/test-https-localaddress.js | 4 +++- 7 files changed, 19 insertions(+), 21 deletions(-) diff --git a/test/common/README.md b/test/common/README.md index ee36503f920001..9ecee39b64a3df 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -238,12 +238,6 @@ Indicates if [internationalization][] is supported. Indicates whether `IPv6` is supported on this platform. -### `hasMultiLocalhost` - -* [\][] - -Indicates if there are multiple localhosts available. - ### `inFreeBSDJail` * [\][] diff --git a/test/common/index.js b/test/common/index.js index d2c39578324600..e8bf65d0a6edb4 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -489,15 +489,6 @@ function _mustCallInner(fn, criteria = 1, field) { return _return; } -function hasMultiLocalhost() { - const { internalBinding } = require('internal/test/binding'); - const { TCP, constants: TCPConstants } = internalBinding('tcp_wrap'); - const t = new TCP(TCPConstants.SOCKET); - const ret = t.bind('127.0.0.2', 0); - t.close(); - return ret === 0; -} - function skipIfEslintMissing() { if (!fs.existsSync( path.join(__dirname, '..', '..', 'tools', 'eslint', 'node_modules', 'eslint'), @@ -965,7 +956,6 @@ const common = { hasIntl, hasCrypto, hasQuic, - hasMultiLocalhost, invalidArgTypeHelper, isAlive, isASan, diff --git a/test/common/index.mjs b/test/common/index.mjs index 23328ac90ea3c9..090659f93be8ef 100644 --- a/test/common/index.mjs +++ b/test/common/index.mjs @@ -20,7 +20,6 @@ const { hasCrypto, hasIntl, hasIPv6, - hasMultiLocalhost, isAIX, isAlive, isDumbTerminal, @@ -75,7 +74,6 @@ export { hasCrypto, hasIntl, hasIPv6, - hasMultiLocalhost, isAIX, isAlive, isDumbTerminal, diff --git a/test/common/net.js b/test/common/net.js index 84eddd0966ed56..3886c542421005 100644 --- a/test/common/net.js +++ b/test/common/net.js @@ -17,7 +17,17 @@ function checkSupportReusePort() { }); } +function hasMultiLocalhost() { + const { internalBinding } = require('internal/test/binding'); + const { TCP, constants: TCPConstants } = internalBinding('tcp_wrap'); + const t = new TCP(TCPConstants.SOCKET); + const ret = t.bind('127.0.0.2', 0); + t.close(); + return ret === 0; +} + module.exports = { checkSupportReusePort, + hasMultiLocalhost, options, }; diff --git a/test/parallel/test-http-localaddress.js b/test/parallel/test-http-localaddress.js index a0e4bb80a3f8c2..da25ab3047613f 100644 --- a/test/parallel/test-http-localaddress.js +++ b/test/parallel/test-http-localaddress.js @@ -22,8 +22,10 @@ // Flags: --expose-internals 'use strict'; const common = require('../common'); -if (!common.hasMultiLocalhost()) +const { hasMultiLocalhost } = require('../common/net'); +if (!hasMultiLocalhost()) { common.skip('platform-specific test.'); +} const http = require('http'); const assert = require('assert'); diff --git a/test/parallel/test-http2-connect-options.js b/test/parallel/test-http2-connect-options.js index 233ced016974e2..1abcee99e06433 100644 --- a/test/parallel/test-http2-connect-options.js +++ b/test/parallel/test-http2-connect-options.js @@ -4,8 +4,10 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -if (!common.hasMultiLocalhost()) +const { hasMultiLocalhost } = require('../common/net'); +if (!hasMultiLocalhost()) { common.skip('platform-specific test.'); +} const http2 = require('http2'); const assert = require('assert'); diff --git a/test/parallel/test-https-localaddress.js b/test/parallel/test-https-localaddress.js index 0de0974dc69b04..2a4629b34e4105 100644 --- a/test/parallel/test-https-localaddress.js +++ b/test/parallel/test-https-localaddress.js @@ -25,8 +25,10 @@ const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); -if (!common.hasMultiLocalhost()) +const { hasMultiLocalhost } = require('../common/net'); +if (!hasMultiLocalhost()) { common.skip('platform-specific test.'); +} const fixtures = require('../common/fixtures'); const assert = require('assert'); From 58d2dad67dc8f5118e1cf0367888bbc2978e6157 Mon Sep 17 00:00:00 2001 From: Joyee Cheung Date: Sat, 25 Jan 2025 03:30:27 +0100 Subject: [PATCH 125/158] module: integrate TypeScript into compile cache This integrates TypeScript into the compile cache by caching the transpilation (either type-stripping or transforming) output in addition to the V8 code cache that's generated from the transpilation output. Locally this speeds up loading with type stripping of `benchmark/fixtures/strip-types-benchmark.ts` by ~65% and loading with type transforms of `fixtures/transform-types-benchmark.ts` by ~128%. When comparing loading .ts and loading pre-transpiled .js on-disk with the compile cache enabled, previously .ts loaded 46% slower with type-stripping and 66% slower with transforms compared to loading .js files directly. After this patch, .ts loads 12% slower with type-stripping and 22% slower with transforms compared to .js. (Note that the numbers are based on microbenchmark fixtures and do not necessarily represent real-world workloads, though with bigger real-world files, the speed up should be more significant). PR-URL: https://github.com/nodejs/node/pull/56629 Fixes: https://github.com/nodejs/node/issues/54741 Reviewed-By: Geoffrey Booth Reviewed-By: Marco Ippolito Reviewed-By: James M Snell --- lib/internal/modules/typescript.js | 63 ++++++- src/compile_cache.cc | 65 ++++++- src/compile_cache.h | 15 +- src/node_modules.cc | 96 ++++++++++ .../test-compile-cache-typescript-commonjs.js | 166 +++++++++++++++++ .../test-compile-cache-typescript-esm.js | 167 ++++++++++++++++++ ...est-compile-cache-typescript-strip-miss.js | 104 +++++++++++ ...mpile-cache-typescript-strip-sourcemaps.js | 59 +++++++ ...test-compile-cache-typescript-transform.js | 127 +++++++++++++ 9 files changed, 846 insertions(+), 16 deletions(-) create mode 100644 test/parallel/test-compile-cache-typescript-commonjs.js create mode 100644 test/parallel/test-compile-cache-typescript-esm.js create mode 100644 test/parallel/test-compile-cache-typescript-strip-miss.js create mode 100644 test/parallel/test-compile-cache-typescript-strip-sourcemaps.js create mode 100644 test/parallel/test-compile-cache-typescript-transform.js diff --git a/lib/internal/modules/typescript.js b/lib/internal/modules/typescript.js index 6abfc707657b92..17bbc6ba944432 100644 --- a/lib/internal/modules/typescript.js +++ b/lib/internal/modules/typescript.js @@ -22,6 +22,11 @@ const { const { getOptionValue } = require('internal/options'); const assert = require('internal/assert'); const { Buffer } = require('buffer'); +const { + getCompileCacheEntry, + saveCompileCacheEntry, + cachedCodeTypes: { kStrippedTypeScript, kTransformedTypeScript, kTransformedTypeScriptWithSourceMaps }, +} = internalBinding('modules'); /** * The TypeScript parsing mode, either 'strip-only' or 'transform'. @@ -105,11 +110,19 @@ function stripTypeScriptTypes(code, options = kEmptyObject) { }); } +/** + * @typedef {'strip-only' | 'transform'} TypeScriptMode + * @typedef {object} TypeScriptOptions + * @property {TypeScriptMode} mode Mode. + * @property {boolean} sourceMap Whether to generate source maps. + * @property {string|undefined} filename Filename. + */ + /** * Processes TypeScript code by stripping types or transforming. * Handles source maps if needed. * @param {string} code TypeScript code to process. - * @param {object} options The configuration object. + * @param {TypeScriptOptions} options The configuration object. * @returns {string} The processed code. */ function processTypeScriptCode(code, options) { @@ -126,6 +139,20 @@ function processTypeScriptCode(code, options) { return transformedCode; } +/** + * Get the type enum used for compile cache. + * @param {TypeScriptMode} mode Mode of transpilation. + * @param {boolean} sourceMap Whether source maps are enabled. + * @returns {number} + */ +function getCachedCodeType(mode, sourceMap) { + if (mode === 'transform') { + if (sourceMap) { return kTransformedTypeScriptWithSourceMaps; } + return kTransformedTypeScript; + } + return kStrippedTypeScript; +} + /** * Performs type-stripping to TypeScript source code internally. * It is used by internal loaders. @@ -142,12 +169,40 @@ function stripTypeScriptModuleTypes(source, filename, emitWarning = true) { if (isUnderNodeModules(filename)) { throw new ERR_UNSUPPORTED_NODE_MODULES_TYPE_STRIPPING(filename); } + const sourceMap = getOptionValue('--enable-source-maps'); + + const mode = getTypeScriptParsingMode(); + + // Instead of caching the compile cache status, just go into C++ to fetch it, + // as checking process.env equally involves calling into C++ anyway, and + // the compile cache can be enabled dynamically. + const type = getCachedCodeType(mode, sourceMap); + // Get a compile cache entry into the native compile cache store, + // keyed by the filename. If the cache can already be loaded on disk, + // cached.transpiled contains the cached string. Otherwise we should do + // the transpilation and save it in the native store later using + // saveCompileCacheEntry(). + const cached = (filename ? getCompileCacheEntry(source, filename, type) : undefined); + if (cached?.transpiled) { // TODO(joyeecheung): return Buffer here. + return cached.transpiled; + } + const options = { - mode: getTypeScriptParsingMode(), - sourceMap: getOptionValue('--enable-source-maps'), + mode, + sourceMap, filename, }; - return processTypeScriptCode(source, options); + + const transpiled = processTypeScriptCode(source, options); + if (cached) { + // cached.external contains a pointer to the native cache entry. + // The cached object would be unreachable once it's out of scope, + // but the pointer inside cached.external would stay around for reuse until + // environment shutdown or when the cache is manually flushed + // to disk. Unwrap it in JS before passing into C++ since it's faster. + saveCompileCacheEntry(cached.external, transpiled); + } + return transpiled; } /** diff --git a/src/compile_cache.cc b/src/compile_cache.cc index 50697bcfe1671d..f13797e5f50288 100644 --- a/src/compile_cache.cc +++ b/src/compile_cache.cc @@ -77,10 +77,27 @@ v8::ScriptCompiler::CachedData* CompileCacheEntry::CopyCache() const { // See comments in CompileCacheHandler::Persist(). constexpr uint32_t kCacheMagicNumber = 0x8adfdbb2; +const char* CompileCacheEntry::type_name() const { + switch (type) { + case CachedCodeType::kCommonJS: + return "CommonJS"; + case CachedCodeType::kESM: + return "ESM"; + case CachedCodeType::kStrippedTypeScript: + return "StrippedTypeScript"; + case CachedCodeType::kTransformedTypeScript: + return "TransformedTypeScript"; + case CachedCodeType::kTransformedTypeScriptWithSourceMaps: + return "TransformedTypeScriptWithSourceMaps"; + default: + UNREACHABLE(); + } +} + void CompileCacheHandler::ReadCacheFile(CompileCacheEntry* entry) { Debug("[compile cache] reading cache from %s for %s %s...", entry->cache_filename, - entry->type == CachedCodeType::kCommonJS ? "CommonJS" : "ESM", + entry->type_name(), entry->source_filename); uv_fs_t req; @@ -256,7 +273,8 @@ void CompileCacheHandler::MaybeSaveImpl(CompileCacheEntry* entry, v8::Local func_or_mod, bool rejected) { DCHECK_NOT_NULL(entry); - Debug("[compile cache] cache for %s was %s, ", + Debug("[compile cache] V8 code cache for %s %s was %s, ", + entry->type_name(), entry->source_filename, rejected ? "rejected" : (entry->cache == nullptr) ? "not initialized" @@ -287,6 +305,25 @@ void CompileCacheHandler::MaybeSave(CompileCacheEntry* entry, MaybeSaveImpl(entry, func, rejected); } +void CompileCacheHandler::MaybeSave(CompileCacheEntry* entry, + std::string_view transpiled) { + CHECK(entry->type == CachedCodeType::kStrippedTypeScript || + entry->type == CachedCodeType::kTransformedTypeScript || + entry->type == CachedCodeType::kTransformedTypeScriptWithSourceMaps); + Debug("[compile cache] saving transpilation cache for %s %s\n", + entry->type_name(), + entry->source_filename); + + // TODO(joyeecheung): it's weird to copy it again here. Convert the v8::String + // directly into buffer held by v8::ScriptCompiler::CachedData here. + int cache_size = static_cast(transpiled.size()); + uint8_t* data = new uint8_t[cache_size]; + memcpy(data, transpiled.data(), cache_size); + entry->cache.reset(new v8::ScriptCompiler::CachedData( + data, cache_size, v8::ScriptCompiler::CachedData::BufferOwned)); + entry->refreshed = true; +} + /** * Persist the compile cache accumulated in memory to disk. * @@ -316,18 +353,25 @@ void CompileCacheHandler::Persist() { // incur a negligible overhead from thread synchronization. for (auto& pair : compiler_cache_store_) { auto* entry = pair.second.get(); + const char* type_name = entry->type_name(); if (entry->cache == nullptr) { - Debug("[compile cache] skip %s because the cache was not initialized\n", + Debug("[compile cache] skip persisting %s %s because the cache was not " + "initialized\n", + type_name, entry->source_filename); continue; } if (entry->refreshed == false) { - Debug("[compile cache] skip %s because cache was the same\n", - entry->source_filename); + Debug( + "[compile cache] skip persisting %s %s because cache was the same\n", + type_name, + entry->source_filename); continue; } if (entry->persisted == true) { - Debug("[compile cache] skip %s because cache was already persisted\n", + Debug("[compile cache] skip persisting %s %s because cache was already " + "persisted\n", + type_name, entry->source_filename); continue; } @@ -363,8 +407,9 @@ void CompileCacheHandler::Persist() { auto cleanup_mkstemp = OnScopeLeave([&mkstemp_req]() { uv_fs_req_cleanup(&mkstemp_req); }); std::string cache_filename_tmp = entry->cache_filename + ".XXXXXX"; - Debug("[compile cache] Creating temporary file for cache of %s...", - entry->source_filename); + Debug("[compile cache] Creating temporary file for cache of %s (%s)...", + entry->source_filename, + type_name); int err = uv_fs_mkstemp( nullptr, &mkstemp_req, cache_filename_tmp.c_str(), nullptr); if (err < 0) { @@ -372,8 +417,10 @@ void CompileCacheHandler::Persist() { continue; } Debug(" -> %s\n", mkstemp_req.path); - Debug("[compile cache] writing cache for %s to temporary file %s [%d %d %d " + Debug("[compile cache] writing cache for %s %s to temporary file %s [%d " + "%d %d " "%d %d]...", + type_name, entry->source_filename, mkstemp_req.path, headers[kMagicNumberOffset], diff --git a/src/compile_cache.h b/src/compile_cache.h index a7bb58c4a0be95..72910084e18bca 100644 --- a/src/compile_cache.h +++ b/src/compile_cache.h @@ -13,10 +13,17 @@ namespace node { class Environment; -// TODO(joyeecheung): move it into a CacheHandler class. +#define CACHED_CODE_TYPES(V) \ + V(kCommonJS, 0) \ + V(kESM, 1) \ + V(kStrippedTypeScript, 2) \ + V(kTransformedTypeScript, 3) \ + V(kTransformedTypeScriptWithSourceMaps, 4) + enum class CachedCodeType : uint8_t { - kCommonJS = 0, - kESM, +#define V(type, value) type = value, + CACHED_CODE_TYPES(V) +#undef V }; struct CompileCacheEntry { @@ -34,6 +41,7 @@ struct CompileCacheEntry { // Copy the cache into a new store for V8 to consume. Caller takes // ownership. v8::ScriptCompiler::CachedData* CopyCache() const; + const char* type_name() const; }; #define COMPILE_CACHE_STATUS(V) \ @@ -70,6 +78,7 @@ class CompileCacheHandler { void MaybeSave(CompileCacheEntry* entry, v8::Local mod, bool rejected); + void MaybeSave(CompileCacheEntry* entry, std::string_view transpiled); std::string_view cache_dir() { return compile_cache_dir_; } private: diff --git a/src/node_modules.cc b/src/node_modules.cc index 4b522a91323c9f..85c8e21cf026ff 100644 --- a/src/node_modules.cc +++ b/src/node_modules.cc @@ -1,6 +1,7 @@ #include "node_modules.h" #include #include "base_object-inl.h" +#include "compile_cache.h" #include "node_errors.h" #include "node_external_reference.h" #include "node_url.h" @@ -21,12 +22,16 @@ namespace modules { using v8::Array; using v8::Context; +using v8::External; using v8::FunctionCallbackInfo; using v8::HandleScope; +using v8::Integer; using v8::Isolate; using v8::Local; using v8::LocalVector; +using v8::Name; using v8::NewStringType; +using v8::Null; using v8::Object; using v8::ObjectTemplate; using v8::Primitive; @@ -498,6 +503,74 @@ void GetCompileCacheDir(const FunctionCallbackInfo& args) { .ToLocalChecked()); } +void GetCompileCacheEntry(const FunctionCallbackInfo& args) { + Isolate* isolate = args.GetIsolate(); + CHECK(args[0]->IsString()); // TODO(joyeecheung): accept buffer. + CHECK(args[1]->IsString()); + CHECK(args[2]->IsUint32()); + Local context = isolate->GetCurrentContext(); + Environment* env = Environment::GetCurrent(context); + if (!env->use_compile_cache()) { + return; + } + Local source = args[0].As(); + Local filename = args[1].As(); + CachedCodeType type = + static_cast(args[2].As()->Value()); + auto* cache_entry = + env->compile_cache_handler()->GetOrInsert(source, filename, type); + if (cache_entry == nullptr) { + return; + } + + v8::LocalVector names(isolate, + {FIXED_ONE_BYTE_STRING(isolate, "external")}); + v8::LocalVector values(isolate, + {v8::External::New(isolate, cache_entry)}); + if (cache_entry->cache != nullptr) { + Debug(env, + DebugCategory::COMPILE_CACHE, + "[compile cache] retrieving transpile cache for %s %s...", + cache_entry->type_name(), + cache_entry->source_filename); + + std::string_view cache( + reinterpret_cast(cache_entry->cache->data), + cache_entry->cache->length); + Local transpiled; + // TODO(joyeecheung): convert with simdutf and into external strings + if (!ToV8Value(context, cache).ToLocal(&transpiled)) { + Debug(env, DebugCategory::COMPILE_CACHE, "failed\n"); + return; + } else { + Debug(env, DebugCategory::COMPILE_CACHE, "success\n"); + } + names.push_back(FIXED_ONE_BYTE_STRING(isolate, "transpiled")); + values.push_back(transpiled); + } else { + Debug(env, + DebugCategory::COMPILE_CACHE, + "[compile cache] no transpile cache for %s %s\n", + cache_entry->type_name(), + cache_entry->source_filename); + } + args.GetReturnValue().Set(Object::New( + isolate, v8::Null(isolate), names.data(), values.data(), names.size())); +} + +void SaveCompileCacheEntry(const FunctionCallbackInfo& args) { + Isolate* isolate = args.GetIsolate(); + Local context = isolate->GetCurrentContext(); + Environment* env = Environment::GetCurrent(context); + DCHECK(env->use_compile_cache()); + CHECK(args[0]->IsExternal()); + CHECK(args[1]->IsString()); // TODO(joyeecheung): accept buffer. + auto* cache_entry = + static_cast(args[0].As()->Value()); + Utf8Value utf8(isolate, args[1].As()); + env->compile_cache_handler()->MaybeSave(cache_entry, utf8.ToStringView()); +} + void BindingData::CreatePerIsolateProperties(IsolateData* isolate_data, Local target) { Isolate* isolate = isolate_data->isolate(); @@ -514,6 +587,8 @@ void BindingData::CreatePerIsolateProperties(IsolateData* isolate_data, SetMethod(isolate, target, "enableCompileCache", EnableCompileCache); SetMethod(isolate, target, "getCompileCacheDir", GetCompileCacheDir); SetMethod(isolate, target, "flushCompileCache", FlushCompileCache); + SetMethod(isolate, target, "getCompileCacheEntry", GetCompileCacheEntry); + SetMethod(isolate, target, "saveCompileCacheEntry", SaveCompileCacheEntry); } void BindingData::CreatePerContextProperties(Local target, @@ -530,12 +605,31 @@ void BindingData::CreatePerContextProperties(Local target, compile_cache_status_values.push_back( \ FIXED_ONE_BYTE_STRING(isolate, #status)); COMPILE_CACHE_STATUS(V) +#undef V USE(target->Set(context, FIXED_ONE_BYTE_STRING(isolate, "compileCacheStatus"), Array::New(isolate, compile_cache_status_values.data(), compile_cache_status_values.size()))); + + LocalVector cached_code_type_keys(isolate); + LocalVector cached_code_type_values(isolate); + +#define V(type, value) \ + cached_code_type_keys.push_back(FIXED_ONE_BYTE_STRING(isolate, #type)); \ + cached_code_type_values.push_back(Integer::New(isolate, value)); \ + DCHECK_EQ(value, cached_code_type_values.size() - 1); + CACHED_CODE_TYPES(V) +#undef V + + USE(target->Set(context, + FIXED_ONE_BYTE_STRING(isolate, "cachedCodeTypes"), + Object::New(isolate, + Null(isolate), + cached_code_type_keys.data(), + cached_code_type_values.data(), + cached_code_type_keys.size()))); } void BindingData::RegisterExternalReferences( @@ -547,6 +641,8 @@ void BindingData::RegisterExternalReferences( registry->Register(EnableCompileCache); registry->Register(GetCompileCacheDir); registry->Register(FlushCompileCache); + registry->Register(GetCompileCacheEntry); + registry->Register(SaveCompileCacheEntry); } } // namespace modules diff --git a/test/parallel/test-compile-cache-typescript-commonjs.js b/test/parallel/test-compile-cache-typescript-commonjs.js new file mode 100644 index 00000000000000..b6c4581ed47be3 --- /dev/null +++ b/test/parallel/test-compile-cache-typescript-commonjs.js @@ -0,0 +1,166 @@ +'use strict'; + +// This tests NODE_COMPILE_CACHE works for CommonJS with types. + +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); +const fixtures = require('../common/fixtures'); + +// Check cache for .ts files that would be run as CommonJS. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'ts', 'test-commonjs-parsing.ts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /saving transpilation cache for StrippedTypeScript .*test-commonjs-parsing\.ts/); + assert.match(output, /writing cache for StrippedTypeScript .*test-commonjs-parsing\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-commonjs-parsing\.ts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-commonjs-parsing\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-commonjs-parsing\.ts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-commonjs-parsing\.ts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-commonjs-parsing\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-commonjs-parsing\.ts because cache was the same/); + return true; + } + }); +} + +// Check cache for .cts files that require .cts files. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'cts', 'test-require-commonjs.cts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /writing cache for StrippedTypeScript .*test-require-commonjs\.cts.*success/); + assert.match(output, /writing cache for StrippedTypeScript .*test-cts-export-foo\.cts.*success/); + assert.match(output, /writing cache for CommonJS .*test-require-commonjs\.cts.*success/); + assert.match(output, /writing cache for CommonJS .*test-cts-export-foo\.cts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-require-commonjs\.cts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-require-commonjs\.cts because cache was the same/); + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-cts-export-foo\.cts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-cts-export-foo\.cts because cache was the same/); + + assert.match(output, /V8 code cache for CommonJS .*test-require-commonjs\.cts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-require-commonjs\.cts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-cts-export-foo\.cts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-cts-export-foo\.cts because cache was the same/); + return true; + } + }); +} + +// Check cache for .cts files that require .mts files. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'cts', 'test-require-mts-module.cts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /writing cache for StrippedTypeScript .*test-require-mts-module\.cts.*success/); + assert.match(output, /writing cache for StrippedTypeScript .*test-mts-export-foo\.mts.*success/); + assert.match(output, /writing cache for CommonJS .*test-require-mts-module\.cts.*success/); + assert.match(output, /writing cache for ESM .*test-mts-export-foo\.mts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-require-mts-module\.cts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-require-mts-module\.cts because cache was the same/); + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-mts-export-foo\.mts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-mts-export-foo\.mts because cache was the same/); + + assert.match(output, /V8 code cache for CommonJS .*test-require-mts-module\.cts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-require-mts-module\.cts because cache was the same/); + assert.match(output, /V8 code cache for ESM .*test-mts-export-foo\.mts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting ESM .*test-mts-export-foo\.mts because cache was the same/); + return true; + } + }); +} diff --git a/test/parallel/test-compile-cache-typescript-esm.js b/test/parallel/test-compile-cache-typescript-esm.js new file mode 100644 index 00000000000000..cec7b814da6679 --- /dev/null +++ b/test/parallel/test-compile-cache-typescript-esm.js @@ -0,0 +1,167 @@ +'use strict'; + +// This tests NODE_COMPILE_CACHE works for ESM with types. + +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); +const fixtures = require('../common/fixtures'); + +// Check cache for .ts files that would be run as ESM. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'ts', 'test-module-typescript.ts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /saving transpilation cache for StrippedTypeScript .*test-module-typescript\.ts/); + assert.match(output, /writing cache for StrippedTypeScript .*test-module-typescript\.ts.*success/); + assert.match(output, /writing cache for ESM .*test-module-typescript\.ts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-module-typescript\.ts.*success/); + assert.match(output, /reading cache from .* for ESM .*test-module-typescript\.ts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-module-typescript\.ts because cache was the same/); + assert.match(output, /V8 code cache for ESM .*test-module-typescript\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting ESM .*test-module-typescript\.ts because cache was the same/); + return true; + } + }); +} + +// Check cache for .mts files that import .mts files. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'mts', 'test-import-module.mts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /writing cache for StrippedTypeScript .*test-import-module\.mts.*success/); + assert.match(output, /writing cache for StrippedTypeScript .*test-mts-export-foo\.mts.*success/); + assert.match(output, /writing cache for ESM .*test-import-module\.mts.*success/); + assert.match(output, /writing cache for ESM .*test-mts-export-foo\.mts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-import-module\.mts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-import-module\.mts because cache was the same/); + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-mts-export-foo\.mts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-mts-export-foo\.mts because cache was the same/); + + assert.match(output, /V8 code cache for ESM .*test-import-module\.mts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting ESM .*test-import-module\.mts because cache was the same/); + assert.match(output, /V8 code cache for ESM .*test-mts-export-foo\.mts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting ESM .*test-mts-export-foo\.mts because cache was the same/); + return true; + } + }); +} + + +// Check cache for .mts files that import .cts files. +{ + tmpdir.refresh(); + const dir = tmpdir.resolve('.compile_cache_dir'); + const script = fixtures.path('typescript', 'mts', 'test-import-commonjs.mts'); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /writing cache for StrippedTypeScript .*test-import-commonjs\.mts.*success/); + assert.match(output, /writing cache for StrippedTypeScript .*test-cts-export-foo\.cts.*success/); + assert.match(output, /writing cache for ESM .*test-import-commonjs\.mts.*success/); + assert.match(output, /writing cache for CommonJS .*test-cts-export-foo\.cts.*success/); + return true; + } + }); + + spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-import-commonjs\.mts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-import-commonjs\.mts because cache was the same/); + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-cts-export-foo\.cts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-cts-export-foo\.cts because cache was the same/); + + assert.match(output, /V8 code cache for ESM .*test-import-commonjs\.mts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting ESM .*test-import-commonjs\.mts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-cts-export-foo\.cts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-cts-export-foo\.cts because cache was the same/); + return true; + } + }); +} diff --git a/test/parallel/test-compile-cache-typescript-strip-miss.js b/test/parallel/test-compile-cache-typescript-strip-miss.js new file mode 100644 index 00000000000000..5d37a377f002e4 --- /dev/null +++ b/test/parallel/test-compile-cache-typescript-strip-miss.js @@ -0,0 +1,104 @@ +'use strict'; + +// This tests NODE_COMPILE_CACHE can handle cache invalidation +// between strip-only TypeScript and transformed TypeScript. + +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); +const fixtures = require('../common/fixtures'); + +tmpdir.refresh(); +const dir = tmpdir.resolve('.compile_cache_dir'); +const script = fixtures.path('typescript', 'ts', 'test-typescript.ts'); + +spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /saving transpilation cache for StrippedTypeScript .*test-typescript\.ts/); + assert.match(output, /writing cache for StrippedTypeScript .*test-typescript\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-typescript\.ts.*success/); + return true; + } + }); + +// Reloading with transform should miss the cache generated without transform. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + // Both the transpile cache and the code cache should be missed. + assert.match(output, /no transpile cache for TransformedTypeScriptWithSourceMaps .*test-typescript\.ts/); + assert.match(output, /reading cache from .* for CommonJS .*test-typescript\.ts.*mismatch/); + // New cache with source map should be generated. + assert.match(output, /writing cache for TransformedTypeScriptWithSourceMaps .*test-typescript\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-typescript\.ts.*success/); + return true; + } + }); + +// Reloading with transform should hit the cache generated with transform. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for TransformedTypeScriptWithSourceMaps .*test-typescript\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-typescript\.ts.*success/); + assert.match(output, /skip persisting TransformedTypeScriptWithSourceMaps .*test-typescript\.ts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-typescript\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-typescript\.ts because cache was the same/); + return true; + } + }); + +// Reloading without transform should hit the co-existing transpile cache generated without transform, +// but miss the code cache generated with transform. +spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-typescript\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-typescript\.ts.*mismatch/); + assert.match(output, /skip persisting StrippedTypeScript .*test-typescript\.ts because cache was the same/); + assert.match(output, /writing cache for CommonJS .*test-typescript\.ts.*success/); + return true; + } + }); diff --git a/test/parallel/test-compile-cache-typescript-strip-sourcemaps.js b/test/parallel/test-compile-cache-typescript-strip-sourcemaps.js new file mode 100644 index 00000000000000..da5e350496f005 --- /dev/null +++ b/test/parallel/test-compile-cache-typescript-strip-sourcemaps.js @@ -0,0 +1,59 @@ +'use strict'; + +// This tests NODE_COMPILE_CACHE can be used for type stripping and ignores +// --enable-source-maps as there's no difference in the code generated. + +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); +const fixtures = require('../common/fixtures'); + +tmpdir.refresh(); +const dir = tmpdir.resolve('.compile_cache_dir'); +const script = fixtures.path('typescript', 'ts', 'test-typescript.ts'); + +spawnSyncAndAssert( + process.execPath, + [script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /saving transpilation cache for StrippedTypeScript .*test-typescript\.ts/); + assert.match(output, /writing cache for StrippedTypeScript .*test-typescript\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-typescript\.ts.*success/); + return true; + } + }); + +// Reloading with source maps should hit the cache generated without source maps, because for +// type stripping, only sourceURL is added regardless of whether source map is enabled. +spawnSyncAndAssert( + process.execPath, + ['--enable-source-maps', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + // Both the transpile cache and the code cache should be missed. + assert.match(output, /retrieving transpile cache for StrippedTypeScript .*test-typescript\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-typescript\.ts.*success/); + assert.match(output, /skip persisting StrippedTypeScript .*test-typescript\.ts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-typescript\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-typescript\.ts because cache was the same/); + return true; + } + }); diff --git a/test/parallel/test-compile-cache-typescript-transform.js b/test/parallel/test-compile-cache-typescript-transform.js new file mode 100644 index 00000000000000..41eb67b203baa1 --- /dev/null +++ b/test/parallel/test-compile-cache-typescript-transform.js @@ -0,0 +1,127 @@ +'use strict'; + +// This tests NODE_COMPILE_CACHE works with --experimental-transform-types. + +require('../common'); +const { spawnSyncAndAssert } = require('../common/child_process'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); +const fixtures = require('../common/fixtures'); + + +tmpdir.refresh(); +const dir = tmpdir.resolve('.compile_cache_dir'); +const script = fixtures.path('typescript', 'ts', 'transformation', 'test-enum.ts'); + +// Check --experimental-transform-types which enables source maps by default. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /saving transpilation cache for TransformedTypeScriptWithSourceMaps .*test-enum\.ts/); + assert.match(output, /writing cache for TransformedTypeScriptWithSourceMaps .*test-enum\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-enum\.ts.*success/); + return true; + } + }); + +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for TransformedTypeScriptWithSourceMaps .*test-enum\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-enum\.ts.*success/); + assert.match(output, /skip persisting TransformedTypeScriptWithSourceMaps .*test-enum\.ts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-enum\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-enum\.ts because cache was the same/); + return true; + } + }); + +// Reloading without source maps should miss the cache generated with source maps. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', '--no-enable-source-maps', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + // Both the transpile cache and the code cache should be missed. + assert.match(output, /no transpile cache for TransformedTypeScript .*test-enum\.ts/); + assert.match(output, /reading cache from .* for CommonJS .*test-enum\.ts.*mismatch/); + // New cache without source map should be generated. + assert.match(output, /writing cache for TransformedTypeScript .*test-enum\.ts.*success/); + assert.match(output, /writing cache for CommonJS .*test-enum\.ts.*success/); + return true; + } + }); + +// Reloading without source maps again should hit the cache generated without source maps. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', '--no-enable-source-maps', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for TransformedTypeScript .*test-enum\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-enum\.ts.*success/); + assert.match(output, /skip persisting TransformedTypeScript .*test-enum\.ts because cache was the same/); + assert.match(output, /V8 code cache for CommonJS .*test-enum\.ts was accepted, keeping the in-memory entry/); + assert.match(output, /skip persisting CommonJS .*test-enum\.ts because cache was the same/); + return true; + } + }); + +// Reloading with source maps again should hit the co-existing transpile cache with source +// maps, but miss the code cache generated without source maps. +spawnSyncAndAssert( + process.execPath, + ['--experimental-transform-types', script], + { + env: { + ...process.env, + NODE_DEBUG_NATIVE: 'COMPILE_CACHE', + NODE_COMPILE_CACHE: dir + }, + cwd: tmpdir.path + }, + { + stderr(output) { + assert.match(output, /retrieving transpile cache for TransformedTypeScriptWithSourceMaps .*test-enum\.ts.*success/); + assert.match(output, /reading cache from .* for CommonJS .*test-enum\.ts.*mismatch/); + assert.match(output, /skip persisting TransformedTypeScriptWithSourceMaps .*test-enum\.ts because cache was the same/); + assert.match(output, /writing cache for CommonJS .*test-enum\.ts.*success/); + return true; + } + }); From e654c8b84ad230c1504c5541039276d005b72ef9 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 22 Jan 2025 13:05:54 -0800 Subject: [PATCH 126/158] test: simplify common/index.js Move single or trivial and limited use things out of common/index.js for the purpose of simplifying and reducing common/index.js PR-URL: https://github.com/nodejs/node/pull/56712 Reviewed-By: Yagiz Nizipli Reviewed-By: Matteo Collina --- test/common/README.md | 6 ------ test/common/index.js | 12 ------------ test/common/index.mjs | 4 ---- test/parallel/test-source-map-enable.js | 3 ++- test/tick-processor/util.js | 11 +++++++++-- test/wasi/test-wasi-io.js | 11 ++++++++--- 6 files changed, 19 insertions(+), 28 deletions(-) diff --git a/test/common/README.md b/test/common/README.md index 9ecee39b64a3df..c3c44e32b3788c 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -279,12 +279,6 @@ Platform check for IBMi. Platform check for Linux. -### `isLinuxPPCBE` - -* [\][] - -Platform check for Linux on PowerPC. - ### `isMacOS` * [\][] diff --git a/test/common/index.js b/test/common/index.js index e8bf65d0a6edb4..238d66e96fe257 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -1034,11 +1034,6 @@ const common = { return require('os').type() === 'OS400'; }, - get isLinuxPPCBE() { - return (process.platform === 'linux') && (process.arch === 'ppc64') && - (require('os').endianness() === 'BE'); - }, - get localhostIPv4() { if (localhostIPv4 !== null) return localhostIPv4; @@ -1067,13 +1062,6 @@ const common = { return +process.env.NODE_COMMON_PORT || 12346; }, - /** - * Returns the EOL character used by this Git checkout. - */ - get checkoutEOL() { - return fs.readFileSync(__filename).includes('\r\n') ? '\r\n' : '\n'; - }, - get isInsideDirWithUnusualChars() { return __dirname.includes('%') || (!isWindows && __dirname.includes('\\')) || diff --git a/test/common/index.mjs b/test/common/index.mjs index 090659f93be8ef..aafef1453bd78a 100644 --- a/test/common/index.mjs +++ b/test/common/index.mjs @@ -7,7 +7,6 @@ const { allowGlobals, buildType, canCreateSymLink, - checkoutEOL, childShouldThrowAndAbort, createZeroFilledFile, enoughTestMem, @@ -27,7 +26,6 @@ const { isIBMi, isInsideDirWithUnusualChars, isLinux, - isLinuxPPCBE, isMainThread, isOpenBSD, isMacOS, @@ -59,7 +57,6 @@ export { allowGlobals, buildType, canCreateSymLink, - checkoutEOL, childShouldThrowAndAbort, createRequire, createZeroFilledFile, @@ -81,7 +78,6 @@ export { isIBMi, isInsideDirWithUnusualChars, isLinux, - isLinuxPPCBE, isMainThread, isOpenBSD, isMacOS, diff --git a/test/parallel/test-source-map-enable.js b/test/parallel/test-source-map-enable.js index 46c25d26cfa8e7..64f4254fcddbc6 100644 --- a/test/parallel/test-source-map-enable.js +++ b/test/parallel/test-source-map-enable.js @@ -242,6 +242,7 @@ function nextdir() { // Persists line lengths for in-memory representation of source file. { + const checkoutEOL = fs.readFileSync(__filename).includes('\r\n') ? '\r\n' : '\n'; const coverageDirectory = nextdir(); spawnSync(process.execPath, [ require.resolve('../fixtures/source-map/istanbul-throw.js'), @@ -250,7 +251,7 @@ function nextdir() { 'istanbul-throw.js', coverageDirectory ); - if (common.checkoutEOL === '\r\n') { + if (checkoutEOL === '\r\n') { assert.deepStrictEqual(sourceMap.lineLengths, [1086, 31, 185, 649, 0]); } else { assert.deepStrictEqual(sourceMap.lineLengths, [1085, 30, 184, 648, 0]); diff --git a/test/tick-processor/util.js b/test/tick-processor/util.js index 6d118b7c38bc66..9586a81c276a65 100644 --- a/test/tick-processor/util.js +++ b/test/tick-processor/util.js @@ -5,14 +5,21 @@ const { isWindows, isSunOS, isAIX, - isLinuxPPCBE, isFreeBSD, } = require('../common'); +const { endianness } = require('os'); + +function isLinuxPPCBE() { + return (process.platform === 'linux') && + (process.arch === 'ppc64') && + (endianness() === 'BE'); +} + module.exports = { isCPPSymbolsNotMapped: isWindows || isSunOS || isAIX || - isLinuxPPCBE || + isLinuxPPCBE() || isFreeBSD, }; diff --git a/test/wasi/test-wasi-io.js b/test/wasi/test-wasi-io.js index 061ac88a73ece4..f5348644f1cfbf 100644 --- a/test/wasi/test-wasi-io.js +++ b/test/wasi/test-wasi-io.js @@ -1,14 +1,19 @@ 'use strict'; -const common = require('../common'); -const { checkoutEOL } = common; +require('../common'); +const { readFileSync } = require('fs'); const { testWasiPreview1 } = require('../common/wasi'); +const checkoutEOL = readFileSync(__filename).includes('\r\n') ? '\r\n' : '\n'; + +// TODO(@jasnell): It's not entirely clear what this test is asserting. +// More comments would be helpful. + testWasiPreview1(['freopen'], {}, { stdout: `hello from input2.txt${checkoutEOL}` }); testWasiPreview1(['read_file'], {}, { stdout: `hello from input.txt${checkoutEOL}` }); testWasiPreview1(['read_file_twice'], {}, { stdout: `hello from input.txt${checkoutEOL}hello from input.txt${checkoutEOL}`, }); // Tests that are currently unsupported on Windows. -if (!common.isWindows) { +if (process.platform !== 'win32') { testWasiPreview1(['stdin'], { input: 'hello world' }, { stdout: 'hello world' }); } From ef2ed713899b4c24d3f20bb7b52f5d094715a6d9 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 22 Jan 2025 14:19:38 -0800 Subject: [PATCH 127/158] test: rely less on duplicative common test harness utilities There are several cleanups here that are not just style nits... 1. The `common.isMainThread` was just a passthrough to the `isMainThread` export on the worker_thread module. It's use was inconsistent and just obfuscated the fact that the test file depend on the `worker_threads` built-in. By eliminating it we simplify the test harness a bit and make it clearer which tests depend on the worker_threads check. 2. The `common.isDumbTerminal` is fairly unnecesary since that just wraps a public API check. 3. Several of the `common.skipIf....` checks were inconsistently used and really don't need to be separate utility functions. A key part of the motivation here is to work towards making more of the tests more self-contained and less reliant on the common test harness where possible. PR-URL: https://github.com/nodejs/node/pull/56712 Reviewed-By: Yagiz Nizipli Reviewed-By: Matteo Collina --- test/abort/test-abort-backtrace.js | 45 ++++++++++++- test/async-hooks/init-hooks.js | 5 +- test/async-hooks/test-crypto-pbkdf2.js | 7 +- test/async-hooks/test-crypto-randomBytes.js | 7 +- test/async-hooks/test-enable-disable.js | 3 +- test/async-hooks/test-fseventwrap.js | 7 +- .../test-fsreqcallback-readFile.js | 4 +- test/async-hooks/test-getaddrinforeqwrap.js | 4 +- test/async-hooks/test-getnameinforeqwrap.js | 4 +- test/async-hooks/test-graph.signal.js | 7 +- .../test-no-assert-when-disabled.js | 5 +- test/async-hooks/test-pipewrap.js | 4 +- ...promise.chain-promise-before-init-hooks.js | 4 +- test/async-hooks/test-promise.js | 4 +- test/async-hooks/test-signalwrap.js | 7 +- test/async-hooks/test-statwatcher.js | 5 +- .../test-unhandled-rejection-context.js | 4 +- test/benchmark/test-benchmark-napi.js | 4 +- test/common/README.md | 17 ----- test/common/index.js | 66 ------------------- test/common/index.mjs | 8 --- test/es-module/test-esm-resolve-type.mjs | 4 +- .../test-vm-main-context-default-loader.js | 6 +- test/fixtures/permission/fs-write.js | 8 ++- test/fixtures/permission/processbinding.js | 6 +- test/internet/test-trace-events-dns.js | 4 +- ...test-async-hooks-disable-during-promise.js | 4 +- .../test-async-hooks-promise-triggerid.js | 4 +- test/parallel/test-async-hooks-promise.js | 4 +- ...st-async-hooks-top-level-clearimmediate.js | 4 +- .../test-async-wrap-promise-after-enabled.js | 4 +- test/parallel/test-bootstrap-modules.js | 10 +-- .../test-child-process-validate-stdio.js | 4 +- .../test-cluster-net-listen-relative-path.js | 9 ++- test/parallel/test-code-cache.js | 3 +- test/parallel/test-console-clear.js | 4 +- test/parallel/test-console.js | 4 +- test/parallel/test-crypto-no-algorithm.js | 3 +- test/parallel/test-cwd-enoent-preload.js | 9 ++- test/parallel/test-cwd-enoent-repl.js | 9 ++- test/parallel/test-cwd-enoent.js | 9 ++- test/parallel/test-fs-mkdir.js | 3 +- test/parallel/test-fs-realpath.js | 4 +- test/parallel/test-fs-whatwg-url.js | 4 +- test/parallel/test-fs-write-file-sync.js | 4 +- test/parallel/test-http-chunk-problem.js | 11 +++- test/parallel/test-icu-env.js | 4 +- .../test-inspector-already-activated-cli.js | 7 +- .../test-inspector-async-hook-after-done.js | 7 +- ...st-inspector-async-hook-setup-at-signal.js | 6 +- .../test-inspector-connect-main-thread.js | 4 +- .../test-inspector-connect-to-main-thread.js | 4 +- test/parallel/test-inspector-contexts.js | 4 +- ...ctor-exit-worker-in-wait-for-connection.js | 12 +++- ...tor-exit-worker-in-wait-for-connection2.js | 6 +- test/parallel/test-inspector-open-coverage.js | 7 +- ...st-inspector-open-port-integer-overflow.js | 7 +- .../test-inspector-overwrite-config.js | 4 +- .../test-inspector-port-zero-cluster.js | 7 +- .../parallel/test-inspector-tracing-domain.js | 8 ++- .../test-inspector-workers-flat-list.js | 4 +- test/parallel/test-internal-module-require.js | 3 +- ...st-performance-nodetiming-uvmetricsinfo.js | 6 +- .../test-permission-allow-addons-cli.js | 6 +- ...test-permission-allow-child-process-cli.js | 8 ++- .../test-permission-allow-wasi-cli.js | 6 +- .../test-permission-child-process-cli.js | 7 +- .../test-permission-fs-absolute-path.js | 6 +- ...test-permission-fs-internal-module-stat.js | 6 +- test/parallel/test-permission-fs-read.js | 6 +- .../test-permission-fs-relative-path.js | 6 +- .../test-permission-fs-repeat-path.js | 6 +- test/parallel/test-permission-fs-require.js | 8 ++- .../test-permission-fs-symlink-relative.js | 7 +- ...test-permission-fs-symlink-target-write.js | 18 +++-- test/parallel/test-permission-fs-symlink.js | 12 +++- .../test-permission-fs-traversal-path.js | 13 +++- test/parallel/test-permission-fs-wildcard.js | 6 +- .../test-permission-fs-windows-path.js | 6 +- .../test-permission-fs-write-report.js | 10 ++- test/parallel/test-permission-fs-write-v8.js | 10 ++- test/parallel/test-permission-fs-write.js | 10 ++- .../parallel/test-permission-inspector-brk.js | 6 +- test/parallel/test-permission-inspector.js | 7 +- test/parallel/test-permission-no-addons.js | 6 +- .../test-permission-processbinding.js | 6 +- .../test-permission-worker-threads-cli.js | 8 ++- test/parallel/test-pipe-file-to-http.js | 7 +- .../parallel/test-preload-self-referential.js | 4 +- test/parallel/test-process-abort.js | 4 +- .../test-process-beforeexit-throw-exit.js | 6 +- .../test-process-chdir-errormessage.js | 5 +- test/parallel/test-process-chdir.js | 4 +- test/parallel/test-process-env-tz.js | 7 +- test/parallel/test-process-euid-egid.js | 5 +- test/parallel/test-process-exit-handler.js | 4 +- test/parallel/test-process-get-builtin.mjs | 3 +- test/parallel/test-process-initgroups.js | 5 +- test/parallel/test-process-load-env-file.js | 5 +- test/parallel/test-process-setgroups.js | 4 +- test/parallel/test-process-uid-gid.js | 4 +- test/parallel/test-process-umask-mask.js | 3 +- test/parallel/test-process-umask.js | 3 +- ...-readline-interface-no-trailing-newline.js | 4 +- ...est-readline-interface-recursive-writes.js | 4 +- test/parallel/test-readline-interface.js | 5 +- test/parallel/test-readline-position.js | 4 +- .../test-readline-promises-interface.js | 5 +- .../test-readline-promises-tab-complete.js | 4 +- test/parallel/test-readline-tab-complete.js | 4 +- .../test-readline-undefined-columns.js | 4 +- test/parallel/test-readline.js | 4 +- test/parallel/test-repl-autocomplete.js | 4 +- test/parallel/test-repl-editor.js | 4 +- test/parallel/test-repl-history-navigation.js | 4 +- ...repl-load-multiline-no-trailing-newline.js | 4 +- test/parallel/test-repl-load-multiline.js | 4 +- test/parallel/test-repl-mode.js | 4 +- test/parallel/test-repl-permission-model.js | 4 +- test/parallel/test-repl-persistent-history.js | 4 +- .../test-repl-programmatic-history.js | 4 +- .../test-repl-require-self-referential.js | 4 +- test/parallel/test-repl-require.js | 4 +- test/parallel/test-repl-reverse-search.js | 5 +- test/parallel/test-repl-sigint-nested-eval.js | 6 +- test/parallel/test-repl-sigint.js | 6 +- .../test-repl-strict-mode-previews.js | 5 +- .../parallel/test-repl-tab-complete-import.js | 5 +- test/parallel/test-repl-tab-complete.js | 4 +- test/parallel/test-require-symlink.js | 8 ++- test/parallel/test-runner-module-mocking.js | 3 +- test/parallel/test-set-process-debug-port.js | 6 +- test/parallel/test-setproctitle.js | 14 ++-- .../test-shadow-realm-import-value-resolve.js | 5 +- test/parallel/test-signal-args.js | 9 ++- test/parallel/test-signal-handler.js | 9 ++- test/parallel/test-stdio-pipe-access.js | 5 +- test/parallel/test-stdio-pipe-redirect.js | 5 +- .../test-timers-immediate-unref-simple.js | 3 +- test/parallel/test-trace-events-api.js | 7 +- .../test-trace-events-dynamic-enable.js | 8 ++- test/parallel/test-warn-sigprof.js | 9 ++- test/parallel/test-worker-name.js | 11 +++- test/report/test-report-signal.js | 8 ++- test/sequential/test-fs-watch.js | 4 +- test/sequential/test-heapdump.js | 4 +- test/sequential/test-init.js | 4 +- test/sequential/test-perf-hooks.js | 5 +- 148 files changed, 672 insertions(+), 290 deletions(-) diff --git a/test/abort/test-abort-backtrace.js b/test/abort/test-abort-backtrace.js index ce9ed39196eb1f..455bbf2361cf51 100644 --- a/test/abort/test-abort-backtrace.js +++ b/test/abort/test-abort-backtrace.js @@ -1,8 +1,47 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const cp = require('child_process'); +function getPrintedStackTrace(stderr) { + const lines = stderr.split('\n'); + + let state = 'initial'; + const result = { + message: [], + nativeStack: [], + jsStack: [], + }; + for (let i = 0; i < lines.length; ++i) { + const line = lines[i].trim(); + if (line.length === 0) { + continue; // Skip empty lines. + } + + switch (state) { + case 'initial': + result.message.push(line); + if (line.includes('Native stack trace')) { + state = 'native-stack'; + } else { + result.message.push(line); + } + break; + case 'native-stack': + if (line.includes('JavaScript stack trace')) { + state = 'js-stack'; + } else { + result.nativeStack.push(line); + } + break; + case 'js-stack': + result.jsStack.push(line); + break; + } + } + return result; +} + if (process.argv[2] === 'child') { process.abort(); } else { @@ -10,7 +49,7 @@ if (process.argv[2] === 'child') { const stderr = child.stderr.toString(); assert.strictEqual(child.stdout.toString(), ''); - const { nativeStack, jsStack } = common.getPrintedStackTrace(stderr); + const { nativeStack, jsStack } = getPrintedStackTrace(stderr); if (!nativeStack.every((frame, index) => frame.startsWith(`${index + 1}:`))) { assert.fail(`Each frame should start with a frame number:\n${stderr}`); @@ -18,7 +57,7 @@ if (process.argv[2] === 'child') { // For systems that don't support backtraces, the native stack is // going to be empty. - if (!common.isWindows && nativeStack.length > 0) { + if (process.platform !== 'win32' && nativeStack.length > 0) { const { getBinaryPath } = require('../common/shared-lib-util'); if (!nativeStack.some((frame) => frame.includes(`[${getBinaryPath()}]`))) { assert.fail(`Some native stack frame include the binary name:\n${stderr}`); diff --git a/test/async-hooks/init-hooks.js b/test/async-hooks/init-hooks.js index 2206ab31eba75f..8fc44994fbc497 100644 --- a/test/async-hooks/init-hooks.js +++ b/test/async-hooks/init-hooks.js @@ -1,9 +1,10 @@ 'use strict'; // Flags: --expose-gc -const common = require('../common'); +require('../common'); const assert = require('assert'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); const util = require('util'); const print = process._rawDebug; @@ -161,7 +162,7 @@ class ActivityCollector { const stub = { uid, type: 'Unknown', handleIsObject: true, handle: {} }; this._activities.set(uid, stub); return stub; - } else if (!common.isMainThread) { + } else if (!isMainThread) { // Worker threads start main script execution inside of an AsyncWrap // callback, so we don't yield errors for these. return null; diff --git a/test/async-hooks/test-crypto-pbkdf2.js b/test/async-hooks/test-crypto-pbkdf2.js index 4788ce4a580656..c607adf7258760 100644 --- a/test/async-hooks/test-crypto-pbkdf2.js +++ b/test/async-hooks/test-crypto-pbkdf2.js @@ -1,10 +1,13 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); -if (!common.isMainThread) +} +const { isMainThread } = require('worker_threads'); +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const assert = require('assert'); const tick = require('../common/tick'); diff --git a/test/async-hooks/test-crypto-randomBytes.js b/test/async-hooks/test-crypto-randomBytes.js index 88cd4643ab6638..8ecc1c45a9a524 100644 --- a/test/async-hooks/test-crypto-randomBytes.js +++ b/test/async-hooks/test-crypto-randomBytes.js @@ -1,10 +1,13 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); -if (!common.isMainThread) +} +const { isMainThread } = require('worker_threads'); +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const assert = require('assert'); const tick = require('../common/tick'); diff --git a/test/async-hooks/test-enable-disable.js b/test/async-hooks/test-enable-disable.js index 64139408a48209..d408338e892c32 100644 --- a/test/async-hooks/test-enable-disable.js +++ b/test/async-hooks/test-enable-disable.js @@ -87,8 +87,9 @@ const assert = require('assert'); const tick = require('../common/tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) common.skip('Worker bootstrapping works differently -> different timing'); // Include "Unknown"s because hook2 will not be able to identify diff --git a/test/async-hooks/test-fseventwrap.js b/test/async-hooks/test-fseventwrap.js index 12a439f8033cbc..a5e1a3b9d2f232 100644 --- a/test/async-hooks/test-fseventwrap.js +++ b/test/async-hooks/test-fseventwrap.js @@ -6,12 +6,15 @@ const initHooks = require('./init-hooks'); const tick = require('../common/tick'); const { checkInvocations } = require('./hook-checks'); const fs = require('fs'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} -if (common.isIBMi) +if (common.isIBMi) { common.skip('IBMi does not support fs.watch()'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-fsreqcallback-readFile.js b/test/async-hooks/test-fsreqcallback-readFile.js index 01ccce9b4cc694..65f3652f12f988 100644 --- a/test/async-hooks/test-fsreqcallback-readFile.js +++ b/test/async-hooks/test-fsreqcallback-readFile.js @@ -6,9 +6,11 @@ const tick = require('../common/tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); const fs = require('fs'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-getaddrinforeqwrap.js b/test/async-hooks/test-getaddrinforeqwrap.js index 7291ea8a301954..a21557bcd56e7a 100644 --- a/test/async-hooks/test-getaddrinforeqwrap.js +++ b/test/async-hooks/test-getaddrinforeqwrap.js @@ -6,9 +6,11 @@ const tick = require('../common/tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); const dns = require('dns'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-getnameinforeqwrap.js b/test/async-hooks/test-getnameinforeqwrap.js index c7a3937ff3ceef..b00fa0d4d9dd54 100644 --- a/test/async-hooks/test-getnameinforeqwrap.js +++ b/test/async-hooks/test-getnameinforeqwrap.js @@ -6,9 +6,11 @@ const tick = require('../common/tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); const dns = require('dns'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-graph.signal.js b/test/async-hooks/test-graph.signal.js index f87b1215b523b9..351fb7550af431 100644 --- a/test/async-hooks/test-graph.signal.js +++ b/test/async-hooks/test-graph.signal.js @@ -1,10 +1,13 @@ 'use strict'; const common = require('../common'); -if (common.isWindows) +if (common.isWindows) { common.skip('no signals on Windows'); -if (!common.isMainThread) +} +const { isMainThread } = require('worker_threads'); +if (!isMainThread) { common.skip('No signal handling available in Workers'); +} const initHooks = require('./init-hooks'); const verifyGraph = require('./verify-graph'); diff --git a/test/async-hooks/test-no-assert-when-disabled.js b/test/async-hooks/test-no-assert-when-disabled.js index 70114d1e1140f8..0e7c0568cc09fa 100644 --- a/test/async-hooks/test-no-assert-when-disabled.js +++ b/test/async-hooks/test-no-assert-when-disabled.js @@ -1,9 +1,10 @@ 'use strict'; // Flags: --no-force-async-hooks-checks --expose-internals const common = require('../common'); - -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); +if (!isMainThread) { common.skip('Workers don\'t inherit per-env state like the check flag'); +} const async_hooks = require('internal/async_hooks'); diff --git a/test/async-hooks/test-pipewrap.js b/test/async-hooks/test-pipewrap.js index 2d42e769cfd1f3..7ea5f38adc85e2 100644 --- a/test/async-hooks/test-pipewrap.js +++ b/test/async-hooks/test-pipewrap.js @@ -9,9 +9,11 @@ const tick = require('../common/tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); const { spawn } = require('child_process'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-promise.chain-promise-before-init-hooks.js b/test/async-hooks/test-promise.chain-promise-before-init-hooks.js index 52a312dbdfe196..c5e67b6f94ca68 100644 --- a/test/async-hooks/test-promise.chain-promise-before-init-hooks.js +++ b/test/async-hooks/test-promise.chain-promise-before-init-hooks.js @@ -4,9 +4,11 @@ const common = require('../common'); const assert = require('assert'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const p = new Promise(common.mustCall(function executor(resolve) { resolve(5); diff --git a/test/async-hooks/test-promise.js b/test/async-hooks/test-promise.js index 417cb3c80d6298..554c3ae7dd711e 100644 --- a/test/async-hooks/test-promise.js +++ b/test/async-hooks/test-promise.js @@ -5,9 +5,11 @@ const common = require('../common'); const assert = require('assert'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const hooks = initHooks(); diff --git a/test/async-hooks/test-signalwrap.js b/test/async-hooks/test-signalwrap.js index 4584d140ce1d0f..60adaedd476f27 100644 --- a/test/async-hooks/test-signalwrap.js +++ b/test/async-hooks/test-signalwrap.js @@ -1,10 +1,13 @@ 'use strict'; const common = require('../common'); -if (common.isWindows) +if (common.isWindows) { common.skip('no signals in Windows'); -if (!common.isMainThread) +} +const { isMainThread } = require('worker_threads'); +if (!isMainThread) { common.skip('No signal handling available in Workers'); +} const assert = require('assert'); const initHooks = require('./init-hooks'); diff --git a/test/async-hooks/test-statwatcher.js b/test/async-hooks/test-statwatcher.js index f3c0e74355eeba..8f4fb2175885f3 100644 --- a/test/async-hooks/test-statwatcher.js +++ b/test/async-hooks/test-statwatcher.js @@ -7,8 +7,11 @@ const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); const fs = require('fs'); -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} tmpdir.refresh(); diff --git a/test/async-hooks/test-unhandled-rejection-context.js b/test/async-hooks/test-unhandled-rejection-context.js index 8404cf71f0db6f..168b51a3331f7f 100644 --- a/test/async-hooks/test-unhandled-rejection-context.js +++ b/test/async-hooks/test-unhandled-rejection-context.js @@ -5,9 +5,11 @@ const common = require('../common'); const assert = require('assert'); const initHooks = require('./init-hooks'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const promiseAsyncIds = []; const hooks = initHooks({ diff --git a/test/benchmark/test-benchmark-napi.js b/test/benchmark/test-benchmark-napi.js index 7164efe3d4e718..518e10a5111a5b 100644 --- a/test/benchmark/test-benchmark-napi.js +++ b/test/benchmark/test-benchmark-napi.js @@ -6,7 +6,9 @@ if (common.isWindows) { common.skip('vcbuild.bat doesn\'t build the n-api benchmarks yet'); } -if (!common.isMainThread) { +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('addons are not supported in workers'); } diff --git a/test/common/README.md b/test/common/README.md index c3c44e32b3788c..887dee2783ad72 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -102,10 +102,6 @@ symlinks ([SeCreateSymbolicLinkPrivilege](https://msdn.microsoft.com/en-us/library/windows/desktop/bb530716\(v=vs.85\).aspx)). On non-Windows platforms, this always returns `true`. -### `createZeroFilledFile(filename)` - -Creates a 10 MiB file of all null characters. - ### `enoughTestMem` * [\][] @@ -257,10 +253,6 @@ Platform check for Advanced Interactive eXecutive (AIX). Attempts to 'kill' `pid` -### `isDumbTerminal` - -* [\][] - ### `isFreeBSD` * [\][] @@ -456,10 +448,6 @@ will not be run. Logs '1..0 # Skipped: ' + `msg` and exits with exit code `0`. -### `skipIfDumbTerminal()` - -Skip the rest of the tests if the current terminal is a dumb terminal - ### `skipIfEslintMissing()` Skip the rest of the tests in the current file when `ESLint` is not available @@ -475,11 +463,6 @@ was disabled at compile time. Skip the rest of the tests in the current file when the Node.js executable was compiled with a pointer size smaller than 64 bits. -### `skipIfWorker()` - -Skip the rest of the tests in the current file when not running on a main -thread. - ## ArrayStream module The `ArrayStream` module provides a simple `Stream` that pushes elements from diff --git a/test/common/index.js b/test/common/index.js index 238d66e96fe257..6086d584f0b595 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -139,8 +139,6 @@ const isPi = (() => { } })(); -const isDumbTerminal = process.env.TERM === 'dumb'; - // When using high concurrency or in the CI we need much more time for each connection attempt net.setDefaultAutoSelectFamilyAttemptTimeout(platformTimeout(net.getDefaultAutoSelectFamilyAttemptTimeout() * 10)); const defaultAutoSelectFamilyAttemptTimeout = net.getDefaultAutoSelectFamilyAttemptTimeout(); @@ -243,13 +241,6 @@ function childShouldThrowAndAbort() { }); } -function createZeroFilledFile(filename) { - const fd = fs.openSync(filename, 'w'); - fs.ftruncateSync(fd, 10 * 1024 * 1024); - fs.closeSync(fd); -} - - const pwdCommand = isWindows ? ['cmd.exe', ['/d', '/c', 'cd']] : ['pwd', []]; @@ -716,12 +707,6 @@ function skipIf32Bits() { } } -function skipIfWorker() { - if (!isMainThread) { - skip('This test only works on a main thread'); - } -} - function getArrayBufferViews(buf) { const { buffer, byteOffset, byteLength } = buf; @@ -806,12 +791,6 @@ function invalidArgTypeHelper(input) { return ` Received type ${typeof input} (${inspected})`; } -function skipIfDumbTerminal() { - if (isDumbTerminal) { - skip('skipping - dumb terminal'); - } -} - function requireNoPackageJSONAbove(dir = __dirname) { let possiblePackage = path.join(dir, '..', 'package.json'); let lastPackage = null; @@ -882,45 +861,6 @@ function escapePOSIXShell(cmdParts, ...args) { return [cmd, { env }]; }; -function getPrintedStackTrace(stderr) { - const lines = stderr.split('\n'); - - let state = 'initial'; - const result = { - message: [], - nativeStack: [], - jsStack: [], - }; - for (let i = 0; i < lines.length; ++i) { - const line = lines[i].trim(); - if (line.length === 0) { - continue; // Skip empty lines. - } - - switch (state) { - case 'initial': - result.message.push(line); - if (line.includes('Native stack trace')) { - state = 'native-stack'; - } else { - result.message.push(line); - } - break; - case 'native-stack': - if (line.includes('JavaScript stack trace')) { - state = 'js-stack'; - } else { - result.nativeStack.push(line); - } - break; - case 'js-stack': - result.jsStack.push(line); - break; - } - } - return result; -} - /** * Check the exports of require(esm). * TODO(joyeecheung): use it in all the test-require-module-* tests to minimize changes @@ -943,7 +883,6 @@ const common = { buildType, canCreateSymLink, childShouldThrowAndAbort, - createZeroFilledFile, defaultAutoSelectFamilyAttemptTimeout, escapePOSIXShell, expectsError, @@ -951,7 +890,6 @@ const common = { expectWarning, getArrayBufferViews, getBufferSources, - getPrintedStackTrace, getTTYfd, hasIntl, hasCrypto, @@ -960,10 +898,8 @@ const common = { isAlive, isASan, isDebug, - isDumbTerminal, isFreeBSD, isLinux, - isMainThread, isOpenBSD, isMacOS, isPi, @@ -985,10 +921,8 @@ const common = { runWithInvalidFD, skip, skipIf32Bits, - skipIfDumbTerminal, skipIfEslintMissing, skipIfInspectorDisabled, - skipIfWorker, spawnPromisified, get enoughTestMem() { diff --git a/test/common/index.mjs b/test/common/index.mjs index aafef1453bd78a..dd0adadcb28d38 100644 --- a/test/common/index.mjs +++ b/test/common/index.mjs @@ -8,7 +8,6 @@ const { buildType, canCreateSymLink, childShouldThrowAndAbort, - createZeroFilledFile, enoughTestMem, escapePOSIXShell, expectsError, @@ -21,12 +20,10 @@ const { hasIPv6, isAIX, isAlive, - isDumbTerminal, isFreeBSD, isIBMi, isInsideDirWithUnusualChars, isLinux, - isMainThread, isOpenBSD, isMacOS, isSunOS, @@ -45,7 +42,6 @@ const { runWithInvalidFD, skip, skipIf32Bits, - skipIfDumbTerminal, skipIfEslintMissing, skipIfInspectorDisabled, spawnPromisified, @@ -59,7 +55,6 @@ export { canCreateSymLink, childShouldThrowAndAbort, createRequire, - createZeroFilledFile, enoughTestMem, escapePOSIXShell, expectsError, @@ -73,12 +68,10 @@ export { hasIPv6, isAIX, isAlive, - isDumbTerminal, isFreeBSD, isIBMi, isInsideDirWithUnusualChars, isLinux, - isMainThread, isOpenBSD, isMacOS, isSunOS, @@ -97,7 +90,6 @@ export { runWithInvalidFD, skip, skipIf32Bits, - skipIfDumbTerminal, skipIfEslintMissing, skipIfInspectorDisabled, spawnPromisified, diff --git a/test/es-module/test-esm-resolve-type.mjs b/test/es-module/test-esm-resolve-type.mjs index 22163bbd5defb8..9d97413379ad3c 100644 --- a/test/es-module/test-esm-resolve-type.mjs +++ b/test/es-module/test-esm-resolve-type.mjs @@ -13,8 +13,10 @@ import path from 'path'; import fs from 'fs'; import url from 'url'; import process from 'process'; +import { isMainThread } from 'worker_threads'; -if (!common.isMainThread) { + +if (!isMainThread) { common.skip( 'test-esm-resolve-type.mjs: process.chdir is not available in Workers' ); diff --git a/test/es-module/test-vm-main-context-default-loader.js b/test/es-module/test-vm-main-context-default-loader.js index f9edc761465d96..bda954be6ebf97 100644 --- a/test/es-module/test-vm-main-context-default-loader.js +++ b/test/es-module/test-vm-main-context-default-loader.js @@ -3,7 +3,11 @@ const common = require('../common'); // Can't process.chdir() in worker. -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const tmpdir = require('../common/tmpdir'); const fixtures = require('../common/fixtures'); diff --git a/test/fixtures/permission/fs-write.js b/test/fixtures/permission/fs-write.js index 0c0ec72602041a..83fe3d234db290 100644 --- a/test/fixtures/permission/fs-write.js +++ b/test/fixtures/permission/fs-write.js @@ -1,7 +1,11 @@ 'use strict'; const common = require('../../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const fs = require('fs'); @@ -553,4 +557,4 @@ const relativeProtectedFolder = process.env.RELATIVEBLOCKEDFOLDER; }, { code: 'ERR_ACCESS_DENIED', }); -} \ No newline at end of file +} diff --git a/test/fixtures/permission/processbinding.js b/test/fixtures/permission/processbinding.js index bdb958fb01b5ca..69e2fac5d7f151 100644 --- a/test/fixtures/permission/processbinding.js +++ b/test/fixtures/permission/processbinding.js @@ -1,5 +1,9 @@ const common = require('../../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); diff --git a/test/internet/test-trace-events-dns.js b/test/internet/test-trace-events-dns.js index c18a49bc9496c8..c5df4751374399 100644 --- a/test/internet/test-trace-events-dns.js +++ b/test/internet/test-trace-events-dns.js @@ -5,9 +5,11 @@ const cp = require('child_process'); const tmpdir = require('../common/tmpdir'); const fs = require('fs'); const util = require('util'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const traceFile = 'node_trace.1.log'; diff --git a/test/parallel/test-async-hooks-disable-during-promise.js b/test/parallel/test-async-hooks-disable-during-promise.js index 6b9b53bd30f0f5..a25dae51e1f82d 100644 --- a/test/parallel/test-async-hooks-disable-during-promise.js +++ b/test/parallel/test-async-hooks-disable-during-promise.js @@ -1,9 +1,11 @@ 'use strict'; const common = require('../common'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different AsyncWraps'); +} const hook = async_hooks.createHook({ init: common.mustCall(2), diff --git a/test/parallel/test-async-hooks-promise-triggerid.js b/test/parallel/test-async-hooks-promise-triggerid.js index b860d60999e1ef..89e5bc1464f8d5 100644 --- a/test/parallel/test-async-hooks-promise-triggerid.js +++ b/test/parallel/test-async-hooks-promise-triggerid.js @@ -2,9 +2,11 @@ const common = require('../common'); const assert = require('assert'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const promiseAsyncIds = []; diff --git a/test/parallel/test-async-hooks-promise.js b/test/parallel/test-async-hooks-promise.js index 9db510e329ffad..74f72a188240a0 100644 --- a/test/parallel/test-async-hooks-promise.js +++ b/test/parallel/test-async-hooks-promise.js @@ -2,9 +2,11 @@ const common = require('../common'); const assert = require('assert'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} const initCalls = []; const resolveCalls = []; diff --git a/test/parallel/test-async-hooks-top-level-clearimmediate.js b/test/parallel/test-async-hooks-top-level-clearimmediate.js index cc5fcf48eb50b3..fd91fefa9c4bce 100644 --- a/test/parallel/test-async-hooks-top-level-clearimmediate.js +++ b/test/parallel/test-async-hooks-top-level-clearimmediate.js @@ -5,9 +5,11 @@ const common = require('../common'); const assert = require('assert'); const async_hooks = require('async_hooks'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different async IDs'); +} let seenId, seenResource; diff --git a/test/parallel/test-async-wrap-promise-after-enabled.js b/test/parallel/test-async-wrap-promise-after-enabled.js index 0d58cbd653868b..cbca873574c1f8 100644 --- a/test/parallel/test-async-wrap-promise-after-enabled.js +++ b/test/parallel/test-async-wrap-promise-after-enabled.js @@ -4,9 +4,11 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Worker bootstrapping works differently -> different timing'); +} const async_hooks = require('async_hooks'); diff --git a/test/parallel/test-bootstrap-modules.js b/test/parallel/test-bootstrap-modules.js index c0ba01d3891477..4b02961e63ef9f 100644 --- a/test/parallel/test-bootstrap-modules.js +++ b/test/parallel/test-bootstrap-modules.js @@ -117,7 +117,9 @@ expected.atRunTime = new Set([ 'NativeModule internal/modules/esm/utils', ]); -if (common.isMainThread) { +const { isMainThread } = require('worker_threads'); + +if (isMainThread) { [ 'NativeModule url', ].forEach(expected.beforePreExec.add.bind(expected.beforePreExec)); @@ -188,7 +190,7 @@ function err(message) { } } -if (common.isMainThread) { +if (isMainThread) { const missing = expected.beforePreExec.difference(actual.beforePreExec); const extra = actual.beforePreExec.difference(expected.beforePreExec); if (missing.size !== 0) { @@ -214,10 +216,10 @@ if (common.isMainThread) { } } -if (!common.isMainThread) { +if (!isMainThread) { // For workers, just merge beforePreExec into atRunTime for now. // When we start adding modules to the worker snapshot, this branch - // can be removed and we can just remove the common.isMainThread + // can be removed and we can just remove the isMainThread // conditions. expected.beforePreExec.forEach(expected.atRunTime.add.bind(expected.atRunTime)); actual.beforePreExec.forEach(actual.atRunTime.add.bind(actual.atRunTime)); diff --git a/test/parallel/test-child-process-validate-stdio.js b/test/parallel/test-child-process-validate-stdio.js index d5958c694ff6ff..5ba6f0fd123cc1 100644 --- a/test/parallel/test-child-process-validate-stdio.js +++ b/test/parallel/test-child-process-validate-stdio.js @@ -43,7 +43,9 @@ assert.throws(() => getValidStdio(stdio2, true), assert.throws(() => getValidStdio(stdio), expectedError); } -if (common.isMainThread) { +const { isMainThread } = require('worker_threads'); + +if (isMainThread) { const stdio3 = [process.stdin, process.stdout, process.stderr]; const result = getValidStdio(stdio3, false); assert.deepStrictEqual(result, { diff --git a/test/parallel/test-cluster-net-listen-relative-path.js b/test/parallel/test-cluster-net-listen-relative-path.js index bb4d0b90f203e6..16d2bf5c836b53 100644 --- a/test/parallel/test-cluster-net-listen-relative-path.js +++ b/test/parallel/test-cluster-net-listen-relative-path.js @@ -1,11 +1,16 @@ 'use strict'; const common = require('../common'); -if (common.isWindows) +if (common.isWindows) { common.skip('On Windows named pipes live in their own ' + 'filesystem and don\'t have a ~100 byte limit'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const cluster = require('cluster'); diff --git a/test/parallel/test-code-cache.js b/test/parallel/test-code-cache.js index 1c768d664c8a18..576f713af1b02a 100644 --- a/test/parallel/test-code-cache.js +++ b/test/parallel/test-code-cache.js @@ -5,7 +5,8 @@ // and the cache is used when built in modules are compiled. // Otherwise, verifies that no cache is used when compiling builtins. -const { isMainThread } = require('../common'); +require('../common'); +const { isMainThread } = require('worker_threads'); const assert = require('assert'); const { internalBinding diff --git a/test/parallel/test-console-clear.js b/test/parallel/test-console-clear.js index 5975602547922a..8ded51595f654e 100644 --- a/test/parallel/test-console-clear.js +++ b/test/parallel/test-console-clear.js @@ -1,6 +1,6 @@ 'use strict'; -const common = require('../common'); +require('../common'); const assert = require('assert'); const stdoutWrite = process.stdout.write; @@ -18,7 +18,7 @@ function doTest(isTTY, check) { } // Fake TTY -if (!common.isDumbTerminal) { +if (process.env.TERM !== 'dumb') { doTest(true, check); } doTest(false, ''); diff --git a/test/parallel/test-console.js b/test/parallel/test-console.js index 85b46a7c992552..e25ac7178722c0 100644 --- a/test/parallel/test-console.js +++ b/test/parallel/test-console.js @@ -31,10 +31,12 @@ const { restoreStderr } = require('../common/hijackstdio'); +const { isMainThread } = require('worker_threads'); + assert.ok(process.stdout.writable); assert.ok(process.stderr.writable); // Support legacy API -if (common.isMainThread) { +if (isMainThread) { assert.strictEqual(typeof process.stdout.fd, 'number'); assert.strictEqual(typeof process.stderr.fd, 'number'); } diff --git a/test/parallel/test-crypto-no-algorithm.js b/test/parallel/test-crypto-no-algorithm.js index 06124e3d465e41..bb5b81e119c87d 100644 --- a/test/parallel/test-crypto-no-algorithm.js +++ b/test/parallel/test-crypto-no-algorithm.js @@ -11,8 +11,9 @@ if (!hasOpenSSL3) const assert = require('node:assert/strict'); const crypto = require('node:crypto'); +const { isMainThread } = require('worker_threads'); -if (common.isMainThread) { +if (isMainThread) { // TODO(richardlau): Decide if `crypto.setFips` should error if the // provider named "fips" is not available. crypto.setFips(1); diff --git a/test/parallel/test-cwd-enoent-preload.js b/test/parallel/test-cwd-enoent-preload.js index 21b20d6d035672..a7841e984d0eab 100644 --- a/test/parallel/test-cwd-enoent-preload.js +++ b/test/parallel/test-cwd-enoent-preload.js @@ -1,10 +1,15 @@ 'use strict'; const common = require('../common'); // Fails with EINVAL on SmartOS, EBUSY on Windows, EBUSY on AIX. -if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) +if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) { common.skip('cannot rmdir current working directory'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-cwd-enoent-repl.js b/test/parallel/test-cwd-enoent-repl.js index 0a61cbfbced9b4..fcb08c004f345c 100644 --- a/test/parallel/test-cwd-enoent-repl.js +++ b/test/parallel/test-cwd-enoent-repl.js @@ -1,10 +1,15 @@ 'use strict'; const common = require('../common'); // Fails with EINVAL on SmartOS, EBUSY on Windows, EBUSY on AIX. -if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) +if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) { common.skip('cannot rmdir current working directory'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-cwd-enoent.js b/test/parallel/test-cwd-enoent.js index 876888bc2be518..ca8b460835d45a 100644 --- a/test/parallel/test-cwd-enoent.js +++ b/test/parallel/test-cwd-enoent.js @@ -1,10 +1,15 @@ 'use strict'; const common = require('../common'); // Fails with EINVAL on SmartOS, EBUSY on Windows, EBUSY on AIX. -if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) +if (common.isSunOS || common.isWindows || common.isAIX || common.isIBMi) { common.skip('cannot rmdir current working directory'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-fs-mkdir.js b/test/parallel/test-fs-mkdir.js index 89b8b436d5c9f4..f7685c7de0a962 100644 --- a/test/parallel/test-fs-mkdir.js +++ b/test/parallel/test-fs-mkdir.js @@ -24,6 +24,7 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const { isMainThread } = require('worker_threads'); const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); @@ -217,7 +218,7 @@ function nextdir() { // mkdirpSync dirname loop // XXX: windows and smartos have issues removing a directory that you're in. -if (common.isMainThread && (common.isLinux || common.isMacOS)) { +if (isMainThread && (common.isLinux || common.isMacOS)) { const pathname = tmpdir.resolve(nextdir()); fs.mkdirSync(pathname); process.chdir(pathname); diff --git a/test/parallel/test-fs-realpath.js b/test/parallel/test-fs-realpath.js index d944195de3de0c..69237e3974e5b0 100644 --- a/test/parallel/test-fs-realpath.js +++ b/test/parallel/test-fs-realpath.js @@ -23,9 +23,11 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); const tmpdir = require('../common/tmpdir'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-fs-whatwg-url.js b/test/parallel/test-fs-whatwg-url.js index 7401ed7e76ecd1..2d5664cd12015c 100644 --- a/test/parallel/test-fs-whatwg-url.js +++ b/test/parallel/test-fs-whatwg-url.js @@ -5,6 +5,8 @@ const fixtures = require('../common/fixtures'); const assert = require('assert'); const fs = require('fs'); const tmpdir = require('../common/tmpdir'); +const { isMainThread } = require('worker_threads'); + tmpdir.refresh(); const url = fixtures.fileURL('a.js'); @@ -86,7 +88,7 @@ if (common.isWindows) { // Test that strings are interpreted as paths and not as URL // Can't use process.chdir in Workers // Please avoid testing fs.rmdir('file:') or using it as cleanup -if (common.isMainThread && !common.isWindows) { +if (isMainThread && !common.isWindows) { const oldCwd = process.cwd(); process.chdir(tmpdir.path); diff --git a/test/parallel/test-fs-write-file-sync.js b/test/parallel/test-fs-write-file-sync.js index 4ead91530bb748..e5fbe32eab6d14 100644 --- a/test/parallel/test-fs-write-file-sync.js +++ b/test/parallel/test-fs-write-file-sync.js @@ -21,9 +21,11 @@ 'use strict'; const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('Setting process.umask is not supported in Workers'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-http-chunk-problem.js b/test/parallel/test-http-chunk-problem.js index 3629b7576600e8..90c54b8f5c7dcb 100644 --- a/test/parallel/test-http-chunk-problem.js +++ b/test/parallel/test-http-chunk-problem.js @@ -1,9 +1,12 @@ 'use strict'; // http://groups.google.com/group/nodejs/browse_thread/thread/f66cd3c960406919 const common = require('../common'); -if (!common.hasCrypto) + +if (!common.hasCrypto) { common.skip('missing crypto'); +} +const fs = require('fs'); const assert = require('assert'); if (process.argv[2] === 'request') { @@ -73,7 +76,11 @@ function executeRequest(cb) { tmpdir.refresh(); -common.createZeroFilledFile(filename); + +// Create a zero-filled file. +const fd = fs.openSync(filename, 'w'); +fs.ftruncateSync(fd, 10 * 1024 * 1024); +fs.closeSync(fd); server = http.createServer(function(req, res) { res.writeHead(200); diff --git a/test/parallel/test-icu-env.js b/test/parallel/test-icu-env.js index afa36132f60e8d..26075a3d0acec2 100644 --- a/test/parallel/test-icu-env.js +++ b/test/parallel/test-icu-env.js @@ -4,7 +4,7 @@ const assert = require('assert'); const { execFileSync } = require('child_process'); const { readFileSync, globSync } = require('fs'); const { path } = require('../common/fixtures'); - +const { isMainThread } = require('worker_threads'); // This test checks for regressions in environment variable handling and // caching, but the localization data originated from ICU might change @@ -169,7 +169,7 @@ if (isMockable) { // Tests with process.env mutated inside { // process.env.TZ is not intercepted in Workers - if (common.isMainThread) { + if (isMainThread) { assert.strictEqual( isSet(zones.map((TZ) => runEnvInside({ TZ }, () => new Date(333333333333).toString()))), true diff --git a/test/parallel/test-inspector-already-activated-cli.js b/test/parallel/test-inspector-already-activated-cli.js index ba76d5168c14b9..9de226cedca60c 100644 --- a/test/parallel/test-inspector-already-activated-cli.js +++ b/test/parallel/test-inspector-already-activated-cli.js @@ -3,7 +3,12 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const inspector = require('inspector'); diff --git a/test/parallel/test-inspector-async-hook-after-done.js b/test/parallel/test-inspector-async-hook-after-done.js index 9f96fdb7b0da84..b49fe32982e132 100644 --- a/test/parallel/test-inspector-async-hook-after-done.js +++ b/test/parallel/test-inspector-async-hook-after-done.js @@ -3,7 +3,12 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { Worker } = require('worker_threads'); diff --git a/test/parallel/test-inspector-async-hook-setup-at-signal.js b/test/parallel/test-inspector-async-hook-setup-at-signal.js index 43f50d00615723..64a3835e415746 100644 --- a/test/parallel/test-inspector-async-hook-setup-at-signal.js +++ b/test/parallel/test-inspector-async-hook-setup-at-signal.js @@ -6,7 +6,11 @@ common.skipIf32Bits(); const { NodeInstance } = require('../common/inspector-helper.js'); const assert = require('assert'); -common.skipIfWorker(); // Signal starts a server for a main thread inspector +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const script = ` process._rawDebug('Waiting until a signal enables the inspector...'); diff --git a/test/parallel/test-inspector-connect-main-thread.js b/test/parallel/test-inspector-connect-main-thread.js index b724bf3cd9d62f..2281b5efcf3ed8 100644 --- a/test/parallel/test-inspector-connect-main-thread.js +++ b/test/parallel/test-inspector-connect-main-thread.js @@ -10,8 +10,8 @@ const { pathToFileURL } = require('url'); const { isMainThread, parentPort, Worker, workerData } = require('worker_threads'); -if (!workerData) { - common.skipIfWorker(); +if (!workerData && !isMainThread) { + common.skip('This test only works on a main thread'); } function toDebug() { diff --git a/test/parallel/test-inspector-connect-to-main-thread.js b/test/parallel/test-inspector-connect-to-main-thread.js index 7254145a2733f0..9244a85f21b15a 100644 --- a/test/parallel/test-inspector-connect-to-main-thread.js +++ b/test/parallel/test-inspector-connect-to-main-thread.js @@ -6,8 +6,8 @@ common.skipIfInspectorDisabled(); const { Session } = require('inspector'); const { Worker, isMainThread, workerData } = require('worker_threads'); -if (!workerData) { - common.skipIfWorker(); +if (!workerData && !isMainThread) { + common.skip('This test only works on a main thread'); } if (isMainThread) { diff --git a/test/parallel/test-inspector-contexts.js b/test/parallel/test-inspector-contexts.js index 3d6ee4d460e863..9ab2c515b4a9de 100644 --- a/test/parallel/test-inspector-contexts.js +++ b/test/parallel/test-inspector-contexts.js @@ -9,6 +9,8 @@ const assert = require('assert'); const vm = require('vm'); const { Session } = require('inspector'); const { gcUntil } = require('../common/gc'); +const { isMainThread } = require('worker_threads'); + const session = new Session(); session.connect(); @@ -34,7 +36,7 @@ async function testContextCreatedAndDestroyed() { assert.strictEqual(name.includes(`[${process.pid}]`), true); } else { let expects = `${process.argv0}[${process.pid}]`; - if (!common.isMainThread) { + if (!isMainThread) { expects = `Worker[${require('worker_threads').threadId}]`; } assert.strictEqual(expects, name); diff --git a/test/parallel/test-inspector-exit-worker-in-wait-for-connection.js b/test/parallel/test-inspector-exit-worker-in-wait-for-connection.js index 4fcbb092fd23cf..9215d4969fb92f 100644 --- a/test/parallel/test-inspector-exit-worker-in-wait-for-connection.js +++ b/test/parallel/test-inspector-exit-worker-in-wait-for-connection.js @@ -3,9 +3,15 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -const { parentPort, workerData, Worker } = require('node:worker_threads'); -if (!workerData) { - common.skipIfWorker(); +const { + isMainThread, + parentPort, + workerData, + Worker, +} = require('node:worker_threads'); + +if (!workerData && !isMainThread) { + common.skip('This test only works on a main thread'); } const inspector = require('node:inspector'); diff --git a/test/parallel/test-inspector-exit-worker-in-wait-for-connection2.js b/test/parallel/test-inspector-exit-worker-in-wait-for-connection2.js index fb13fc3f969304..cf485ae3a4318f 100644 --- a/test/parallel/test-inspector-exit-worker-in-wait-for-connection2.js +++ b/test/parallel/test-inspector-exit-worker-in-wait-for-connection2.js @@ -3,9 +3,9 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -const { workerData, Worker } = require('node:worker_threads'); -if (!workerData) { - common.skipIfWorker(); +const { isMainThread, workerData, Worker } = require('node:worker_threads'); +if (!workerData && !isMainThread) { + common.skip('This test only works on a main thread'); } const assert = require('node:assert'); diff --git a/test/parallel/test-inspector-open-coverage.js b/test/parallel/test-inspector-open-coverage.js index 259049c36822ab..33f50bfc3f53c4 100644 --- a/test/parallel/test-inspector-open-coverage.js +++ b/test/parallel/test-inspector-open-coverage.js @@ -7,7 +7,12 @@ const fixtures = require('../common/fixtures'); const tmpdir = require('../common/tmpdir'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} tmpdir.refresh(); diff --git a/test/parallel/test-inspector-open-port-integer-overflow.js b/test/parallel/test-inspector-open-port-integer-overflow.js index 0f9a4799d0642a..a1b5c640c4c18d 100644 --- a/test/parallel/test-inspector-open-port-integer-overflow.js +++ b/test/parallel/test-inspector-open-port-integer-overflow.js @@ -5,7 +5,12 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const inspector = require('inspector'); diff --git a/test/parallel/test-inspector-overwrite-config.js b/test/parallel/test-inspector-overwrite-config.js index c20df083256120..53599b31df8acc 100644 --- a/test/parallel/test-inspector-overwrite-config.js +++ b/test/parallel/test-inspector-overwrite-config.js @@ -13,9 +13,11 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('--require does not work with Workers'); +} const inspector = require('inspector'); const msg = 'Test inspector logging'; diff --git a/test/parallel/test-inspector-port-zero-cluster.js b/test/parallel/test-inspector-port-zero-cluster.js index 8e2db0b69d5ca0..5ee7bcf7417345 100644 --- a/test/parallel/test-inspector-port-zero-cluster.js +++ b/test/parallel/test-inspector-port-zero-cluster.js @@ -3,7 +3,12 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} // Assert that even when started with `--inspect=0` workers are assigned // consecutive (i.e. deterministically predictable) debug ports diff --git a/test/parallel/test-inspector-tracing-domain.js b/test/parallel/test-inspector-tracing-domain.js index f5ac6875a0f643..aa31d63a01577d 100644 --- a/test/parallel/test-inspector-tracing-domain.js +++ b/test/parallel/test-inspector-tracing-domain.js @@ -3,7 +3,13 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); // https://github.com/nodejs/node/issues/22767 + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + // https://github.com/nodejs/node/issues/22767 + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { Session } = require('inspector'); diff --git a/test/parallel/test-inspector-workers-flat-list.js b/test/parallel/test-inspector-workers-flat-list.js index 9f6495d10fb147..a7b57fbb0a353b 100644 --- a/test/parallel/test-inspector-workers-flat-list.js +++ b/test/parallel/test-inspector-workers-flat-list.js @@ -6,8 +6,8 @@ common.skipIfInspectorDisabled(); const { Worker, isMainThread, parentPort, workerData } = require('worker_threads'); -if (isMainThread || workerData !== 'launched by test') { - common.skipIfWorker(); +if (!isMainThread || workerData !== 'launched by test') { + common.skip('This test only works on a main thread'); } const { Session } = require('inspector'); diff --git a/test/parallel/test-internal-module-require.js b/test/parallel/test-internal-module-require.js index 058273c7ea4304..213838150b96d9 100644 --- a/test/parallel/test-internal-module-require.js +++ b/test/parallel/test-internal-module-require.js @@ -8,8 +8,9 @@ // 3. Deprecated modules are properly deprecated. const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) { +if (!isMainThread) { common.skip('Cannot test the existence of --expose-internals from worker'); } diff --git a/test/parallel/test-performance-nodetiming-uvmetricsinfo.js b/test/parallel/test-performance-nodetiming-uvmetricsinfo.js index 3d32e0deb72e94..b67682b0ff3559 100644 --- a/test/parallel/test-performance-nodetiming-uvmetricsinfo.js +++ b/test/parallel/test-performance-nodetiming-uvmetricsinfo.js @@ -1,7 +1,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const { spawnSync } = require('node:child_process'); const assert = require('node:assert'); diff --git a/test/parallel/test-permission-allow-addons-cli.js b/test/parallel/test-permission-allow-addons-cli.js index 484f16e0acb3b5..342bdb6bc01e35 100644 --- a/test/parallel/test-permission-allow-addons-cli.js +++ b/test/parallel/test-permission-allow-addons-cli.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const { createRequire } = require('node:module'); const assert = require('node:assert'); diff --git a/test/parallel/test-permission-allow-child-process-cli.js b/test/parallel/test-permission-allow-child-process-cli.js index 794f55ecf9a68c..cf7e79e208d389 100644 --- a/test/parallel/test-permission-allow-child-process-cli.js +++ b/test/parallel/test-permission-allow-child-process-cli.js @@ -2,7 +2,13 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + const assert = require('assert'); const childProcess = require('child_process'); const fs = require('fs'); diff --git a/test/parallel/test-permission-allow-wasi-cli.js b/test/parallel/test-permission-allow-wasi-cli.js index c6bea9fb39cf0a..20aca9292533d5 100644 --- a/test/parallel/test-permission-allow-wasi-cli.js +++ b/test/parallel/test-permission-allow-wasi-cli.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { WASI } = require('wasi'); diff --git a/test/parallel/test-permission-child-process-cli.js b/test/parallel/test-permission-child-process-cli.js index dfea008a60407b..7d8fbf0564d5ef 100644 --- a/test/parallel/test-permission-child-process-cli.js +++ b/test/parallel/test-permission-child-process-cli.js @@ -2,7 +2,12 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + const assert = require('assert'); const childProcess = require('child_process'); diff --git a/test/parallel/test-permission-fs-absolute-path.js b/test/parallel/test-permission-fs-absolute-path.js index 2c2257052c8b02..c3bf9ef5cfb2d1 100644 --- a/test/parallel/test-permission-fs-absolute-path.js +++ b/test/parallel/test-permission-fs-absolute-path.js @@ -3,7 +3,11 @@ const common = require('../common'); const path = require('path'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { spawnSync } = require('child_process'); diff --git a/test/parallel/test-permission-fs-internal-module-stat.js b/test/parallel/test-permission-fs-internal-module-stat.js index fd0222cc34fa2e..ef99e4cca73a4f 100644 --- a/test/parallel/test-permission-fs-internal-module-stat.js +++ b/test/parallel/test-permission-fs-internal-module-stat.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} if (!common.hasCrypto) { common.skip('no crypto'); diff --git a/test/parallel/test-permission-fs-read.js b/test/parallel/test-permission-fs-read.js index ed8e866a6a4c10..b719207bdbd820 100644 --- a/test/parallel/test-permission-fs-read.js +++ b/test/parallel/test-permission-fs-read.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} if (!common.hasCrypto) { common.skip('no crypto'); diff --git a/test/parallel/test-permission-fs-relative-path.js b/test/parallel/test-permission-fs-relative-path.js index 3b115ee35d1227..9f4ce25f0f7d37 100644 --- a/test/parallel/test-permission-fs-relative-path.js +++ b/test/parallel/test-permission-fs-relative-path.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { spawnSync } = require('child_process'); diff --git a/test/parallel/test-permission-fs-repeat-path.js b/test/parallel/test-permission-fs-repeat-path.js index 764c7d91497248..d24197e905063d 100644 --- a/test/parallel/test-permission-fs-repeat-path.js +++ b/test/parallel/test-permission-fs-repeat-path.js @@ -3,7 +3,11 @@ const common = require('../common'); const path = require('path'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { spawnSync } = require('child_process'); diff --git a/test/parallel/test-permission-fs-require.js b/test/parallel/test-permission-fs-require.js index 5d3a407708371e..8406f9ec052eae 100644 --- a/test/parallel/test-permission-fs-require.js +++ b/test/parallel/test-permission-fs-require.js @@ -2,7 +2,13 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + const fixtures = require('../common/fixtures'); const assert = require('node:assert'); diff --git a/test/parallel/test-permission-fs-symlink-relative.js b/test/parallel/test-permission-fs-symlink-relative.js index cf9b37ea79b059..e1fe5d064a8756 100644 --- a/test/parallel/test-permission-fs-symlink-relative.js +++ b/test/parallel/test-permission-fs-symlink-relative.js @@ -2,7 +2,12 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const path = require('path'); diff --git a/test/parallel/test-permission-fs-symlink-target-write.js b/test/parallel/test-permission-fs-symlink-target-write.js index f55b19fa764a89..1cffead4dd7e71 100644 --- a/test/parallel/test-permission-fs-symlink-target-write.js +++ b/test/parallel/test-permission-fs-symlink-target-write.js @@ -2,11 +2,19 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); -if (!common.canCreateSymLink()) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + +if (!common.canCreateSymLink()) { common.skip('insufficient privileges'); -if (!common.hasCrypto) +} + +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); const fs = require('fs'); @@ -15,9 +23,7 @@ const tmpdir = require('../common/tmpdir'); const fixtures = require('../common/fixtures'); const { spawnSync } = require('child_process'); -{ - tmpdir.refresh(); -} +tmpdir.refresh(); const readOnlyFolder = tmpdir.resolve('read-only'); const readWriteFolder = tmpdir.resolve('read-write'); diff --git a/test/parallel/test-permission-fs-symlink.js b/test/parallel/test-permission-fs-symlink.js index 92965c960177d4..e5a80dba44ddf4 100644 --- a/test/parallel/test-permission-fs-symlink.js +++ b/test/parallel/test-permission-fs-symlink.js @@ -2,13 +2,19 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const fixtures = require('../common/fixtures'); -if (!common.canCreateSymLink()) +if (!common.canCreateSymLink()) { common.skip('insufficient privileges'); -if (!common.hasCrypto) +} +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-permission-fs-traversal-path.js b/test/parallel/test-permission-fs-traversal-path.js index 03571c2d01c861..ed9e434b6b862b 100644 --- a/test/parallel/test-permission-fs-traversal-path.js +++ b/test/parallel/test-permission-fs-traversal-path.js @@ -2,13 +2,20 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const fixtures = require('../common/fixtures'); -if (!common.canCreateSymLink()) +if (!common.canCreateSymLink()) { common.skip('insufficient privileges'); -if (!common.hasCrypto) +} + +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); const fs = require('fs'); diff --git a/test/parallel/test-permission-fs-wildcard.js b/test/parallel/test-permission-fs-wildcard.js index adca56ed0dba6d..1b67f37c2dcda2 100644 --- a/test/parallel/test-permission-fs-wildcard.js +++ b/test/parallel/test-permission-fs-wildcard.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const path = require('path'); diff --git a/test/parallel/test-permission-fs-windows-path.js b/test/parallel/test-permission-fs-windows-path.js index 6869b347cf283f..c3b3683b6479f7 100644 --- a/test/parallel/test-permission-fs-windows-path.js +++ b/test/parallel/test-permission-fs-windows-path.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const { spawnSync } = require('child_process'); diff --git a/test/parallel/test-permission-fs-write-report.js b/test/parallel/test-permission-fs-write-report.js index 111f73b7bcc1ed..a5f8d74904fedc 100644 --- a/test/parallel/test-permission-fs-write-report.js +++ b/test/parallel/test-permission-fs-write-report.js @@ -2,9 +2,15 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); -if (!common.hasCrypto) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); diff --git a/test/parallel/test-permission-fs-write-v8.js b/test/parallel/test-permission-fs-write-v8.js index 85cb9a5519b3af..1b8691969b7afb 100644 --- a/test/parallel/test-permission-fs-write-v8.js +++ b/test/parallel/test-permission-fs-write-v8.js @@ -2,9 +2,15 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); -if (!common.hasCrypto) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); const v8 = require('v8'); diff --git a/test/parallel/test-permission-fs-write.js b/test/parallel/test-permission-fs-write.js index 34eab7a40005db..385a37e2a92d86 100644 --- a/test/parallel/test-permission-fs-write.js +++ b/test/parallel/test-permission-fs-write.js @@ -2,9 +2,15 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); -if (!common.hasCrypto) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + +if (!common.hasCrypto) { common.skip('no crypto'); +} const assert = require('assert'); const path = require('path'); diff --git a/test/parallel/test-permission-inspector-brk.js b/test/parallel/test-permission-inspector-brk.js index 61c9c799ba7eb6..3cc7caabd42ba1 100644 --- a/test/parallel/test-permission-inspector-brk.js +++ b/test/parallel/test-permission-inspector-brk.js @@ -5,8 +5,12 @@ const assert = require('assert'); const { spawnSync } = require('child_process'); const fixtures = require('../common/fixtures'); const file = fixtures.path('permission', 'inspector-brk.js'); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} -common.skipIfWorker(); common.skipIfInspectorDisabled(); // See https://github.com/nodejs/node/issues/53385 diff --git a/test/parallel/test-permission-inspector.js b/test/parallel/test-permission-inspector.js index 9d3bf485fc4348..4b52e12abca090 100644 --- a/test/parallel/test-permission-inspector.js +++ b/test/parallel/test-permission-inspector.js @@ -2,7 +2,12 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + common.skipIfInspectorDisabled(); const { Session } = require('inspector'); diff --git a/test/parallel/test-permission-no-addons.js b/test/parallel/test-permission-no-addons.js index a3ae6f4be10641..df08c4aa9f9db5 100644 --- a/test/parallel/test-permission-no-addons.js +++ b/test/parallel/test-permission-no-addons.js @@ -2,7 +2,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const { createRequire } = require('node:module'); const assert = require('node:assert'); diff --git a/test/parallel/test-permission-processbinding.js b/test/parallel/test-permission-processbinding.js index 47a1364f19e303..f5e33dac4deb52 100644 --- a/test/parallel/test-permission-processbinding.js +++ b/test/parallel/test-permission-processbinding.js @@ -1,7 +1,11 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} if (!common.hasCrypto) { common.skip('no crypto'); diff --git a/test/parallel/test-permission-worker-threads-cli.js b/test/parallel/test-permission-worker-threads-cli.js index efd98b2a3881aa..cf397c280474c1 100644 --- a/test/parallel/test-permission-worker-threads-cli.js +++ b/test/parallel/test-permission-worker-threads-cli.js @@ -2,13 +2,17 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); -const assert = require('assert'); const { Worker, isMainThread, } = require('worker_threads'); +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} + +const assert = require('assert'); + // Guarantee the initial state { assert.ok(!process.permission.has('worker')); diff --git a/test/parallel/test-pipe-file-to-http.js b/test/parallel/test-pipe-file-to-http.js index 82bdbe6a832a98..ffbab21f71fd9d 100644 --- a/test/parallel/test-pipe-file-to-http.js +++ b/test/parallel/test-pipe-file-to-http.js @@ -54,7 +54,12 @@ const server = http.createServer((req, res) => { server.listen(0); server.on('listening', () => { - common.createZeroFilledFile(filename); + + // Create a zero-filled file + const fd = fs.openSync(filename, 'w'); + fs.ftruncateSync(fd, 10 * 1024 * 1024); + fs.closeSync(fd); + makeRequest(); }); diff --git a/test/parallel/test-preload-self-referential.js b/test/parallel/test-preload-self-referential.js index 867e1c67983c83..68681332978ea6 100644 --- a/test/parallel/test-preload-self-referential.js +++ b/test/parallel/test-preload-self-referential.js @@ -4,11 +4,13 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); const assert = require('assert'); const { exec } = require('child_process'); +const { isMainThread } = require('worker_threads'); const nodeBinary = process.argv[0]; -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const selfRefModule = fixtures.path('self_ref_module'); const fixtureA = fixtures.path('printA.js'); diff --git a/test/parallel/test-process-abort.js b/test/parallel/test-process-abort.js index 665e1399a3f362..34353befb02a44 100644 --- a/test/parallel/test-process-abort.js +++ b/test/parallel/test-process-abort.js @@ -2,9 +2,11 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.abort() is not available in Workers'); +} // Check that our built-in methods do not have a prototype/constructor behaviour // if they don't need to. This could be tested for any of our C++ methods. diff --git a/test/parallel/test-process-beforeexit-throw-exit.js b/test/parallel/test-process-beforeexit-throw-exit.js index 6e9d764be90baa..c967d3a62712a7 100644 --- a/test/parallel/test-process-beforeexit-throw-exit.js +++ b/test/parallel/test-process-beforeexit-throw-exit.js @@ -1,6 +1,10 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} // Test that 'exit' is emitted if 'beforeExit' throws. diff --git a/test/parallel/test-process-chdir-errormessage.js b/test/parallel/test-process-chdir-errormessage.js index 0ed368287b377e..727a13f6f63f16 100644 --- a/test/parallel/test-process-chdir-errormessage.js +++ b/test/parallel/test-process-chdir-errormessage.js @@ -1,8 +1,11 @@ 'use strict'; const common = require('../common'); -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); assert.throws( diff --git a/test/parallel/test-process-chdir.js b/test/parallel/test-process-chdir.js index ee59df853b24ce..42d2a60c8ec63e 100644 --- a/test/parallel/test-process-chdir.js +++ b/test/parallel/test-process-chdir.js @@ -4,9 +4,11 @@ const common = require('../common'); const assert = require('assert'); const fs = require('fs'); const path = require('path'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-process-env-tz.js b/test/parallel/test-process-env-tz.js index dcc69ed4bf1d3b..b7bf730a9afa38 100644 --- a/test/parallel/test-process-env-tz.js +++ b/test/parallel/test-process-env-tz.js @@ -1,12 +1,15 @@ 'use strict'; const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.env.TZ is not intercepted in Workers'); +} -if (common.isWindows) // Using a different TZ format. +if (common.isWindows) { // Using a different TZ format. common.skip('todo: test on Windows'); +} const date = new Date('2018-04-14T12:34:56.789Z'); diff --git a/test/parallel/test-process-euid-egid.js b/test/parallel/test-process-euid-egid.js index 11a8cfa0ed2b3c..3f4934233a6308 100644 --- a/test/parallel/test-process-euid-egid.js +++ b/test/parallel/test-process-euid-egid.js @@ -3,6 +3,8 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); + if (common.isWindows) { assert.strictEqual(process.geteuid, undefined); assert.strictEqual(process.getegid, undefined); @@ -11,8 +13,9 @@ if (common.isWindows) { return; } -if (!common.isMainThread) +if (!isMainThread) { return; +} assert.throws(() => { process.seteuid({}); diff --git a/test/parallel/test-process-exit-handler.js b/test/parallel/test-process-exit-handler.js index d74e320fe63082..2546aa60a5cf89 100644 --- a/test/parallel/test-process-exit-handler.js +++ b/test/parallel/test-process-exit-handler.js @@ -1,8 +1,10 @@ 'use strict'; const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('execArgv does not affect Workers'); +} // This test ensures that no asynchronous operations are performed in the 'exit' // handler. diff --git a/test/parallel/test-process-get-builtin.mjs b/test/parallel/test-process-get-builtin.mjs index 3cf8179f7286bb..74089c07221688 100644 --- a/test/parallel/test-process-get-builtin.mjs +++ b/test/parallel/test-process-get-builtin.mjs @@ -1,6 +1,7 @@ -import { isMainThread, hasCrypto, hasIntl } from '../common/index.mjs'; +import { hasCrypto, hasIntl } from '../common/index.mjs'; import assert from 'node:assert'; import { builtinModules } from 'node:module'; +import { isMainThread } from 'node:worker_threads'; for (const invalid of [1, undefined, null, false, [], {}, () => {}, Symbol('test')]) { assert.throws(() => process.getBuiltinModule(invalid), { code: 'ERR_INVALID_ARG_TYPE' }); diff --git a/test/parallel/test-process-initgroups.js b/test/parallel/test-process-initgroups.js index 6b4e3bdf1470b4..52597e096175e9 100644 --- a/test/parallel/test-process-initgroups.js +++ b/test/parallel/test-process-initgroups.js @@ -7,8 +7,11 @@ if (common.isWindows) { return; } -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { return; +} [undefined, null, true, {}, [], () => {}].forEach((val) => { assert.throws( diff --git a/test/parallel/test-process-load-env-file.js b/test/parallel/test-process-load-env-file.js index 1dada3aa9b7016..ec99c099d11b80 100644 --- a/test/parallel/test-process-load-env-file.js +++ b/test/parallel/test-process-load-env-file.js @@ -5,6 +5,7 @@ const fixtures = require('../../test/common/fixtures'); const assert = require('node:assert'); const { describe, it } = require('node:test'); const { join } = require('node:path'); +const { isMainThread } = require('worker_threads'); const basicValidEnvFilePath = fixtures.path('dotenv/basic-valid.env'); const validEnvFilePath = fixtures.path('dotenv/valid.env'); @@ -58,7 +59,7 @@ describe('process.loadEnvFile()', () => { const originalCwd = process.cwd(); try { - if (common.isMainThread) { + if (isMainThread) { process.chdir(join(originalCwd, 'lib')); } @@ -66,7 +67,7 @@ describe('process.loadEnvFile()', () => { process.loadEnvFile(); }, { code: 'ENOENT', syscall: 'open', path: '.env' }); } finally { - if (common.isMainThread) { + if (isMainThread) { process.chdir(originalCwd); } } diff --git a/test/parallel/test-process-setgroups.js b/test/parallel/test-process-setgroups.js index 9506f24a5f3447..49d147b6c2ddf5 100644 --- a/test/parallel/test-process-setgroups.js +++ b/test/parallel/test-process-setgroups.js @@ -1,14 +1,16 @@ 'use strict'; const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); if (common.isWindows) { assert.strictEqual(process.setgroups, undefined); return; } -if (!common.isMainThread) +if (!isMainThread) { return; +} assert.throws( () => { diff --git a/test/parallel/test-process-uid-gid.js b/test/parallel/test-process-uid-gid.js index 54e87a6ff5c6e0..10eee45af1555b 100644 --- a/test/parallel/test-process-uid-gid.js +++ b/test/parallel/test-process-uid-gid.js @@ -23,6 +23,7 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); if (common.isWindows) { // uid/gid functions are POSIX only. @@ -33,8 +34,9 @@ if (common.isWindows) { return; } -if (!common.isMainThread) +if (!isMainThread) { return; +} assert.throws(() => { process.setuid({}); diff --git a/test/parallel/test-process-umask-mask.js b/test/parallel/test-process-umask-mask.js index d599379761fd40..f0a67b8f14e895 100644 --- a/test/parallel/test-process-umask-mask.js +++ b/test/parallel/test-process-umask-mask.js @@ -5,8 +5,9 @@ const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) common.skip('Setting process.umask is not supported in Workers'); let mask; diff --git a/test/parallel/test-process-umask.js b/test/parallel/test-process-umask.js index e90955f394df4e..594f75ebebed2b 100644 --- a/test/parallel/test-process-umask.js +++ b/test/parallel/test-process-umask.js @@ -22,8 +22,9 @@ 'use strict'; const common = require('../common'); const assert = require('assert'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) { +if (!isMainThread) { assert.strictEqual(typeof process.umask(), 'number'); assert.throws(() => { process.umask('0664'); diff --git a/test/parallel/test-readline-interface-no-trailing-newline.js b/test/parallel/test-readline-interface-no-trailing-newline.js index b3392db8619c95..398b85838c8b71 100644 --- a/test/parallel/test-readline-interface-no-trailing-newline.js +++ b/test/parallel/test-readline-interface-no-trailing-newline.js @@ -3,7 +3,9 @@ const common = require('../common'); const ArrayStream = require('../common/arraystream'); const assert = require('assert'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const readline = require('readline'); const rli = new readline.Interface({ diff --git a/test/parallel/test-readline-interface-recursive-writes.js b/test/parallel/test-readline-interface-recursive-writes.js index 3a0aee5be9d619..ea3df1968d08d8 100644 --- a/test/parallel/test-readline-interface-recursive-writes.js +++ b/test/parallel/test-readline-interface-recursive-writes.js @@ -3,7 +3,9 @@ const common = require('../common'); const ArrayStream = require('../common/arraystream'); const assert = require('assert'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const readline = require('readline'); const rli = new readline.Interface({ diff --git a/test/parallel/test-readline-interface.js b/test/parallel/test-readline-interface.js index a90e07d235030f..12ba0c709622e9 100644 --- a/test/parallel/test-readline-interface.js +++ b/test/parallel/test-readline-interface.js @@ -22,7 +22,10 @@ // Flags: --expose-internals 'use strict'; const common = require('../common'); -common.skipIfDumbTerminal(); + +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const assert = require('assert'); const readline = require('readline'); diff --git a/test/parallel/test-readline-position.js b/test/parallel/test-readline-position.js index 3603a42ecedc68..ac2fe43b37a097 100644 --- a/test/parallel/test-readline-position.js +++ b/test/parallel/test-readline-position.js @@ -7,7 +7,9 @@ const assert = require('assert'); const ctrlU = { ctrl: true, name: 'u' }; -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} { const input = new PassThrough(); diff --git a/test/parallel/test-readline-promises-interface.js b/test/parallel/test-readline-promises-interface.js index 8e42d977301267..97424c1372629c 100644 --- a/test/parallel/test-readline-promises-interface.js +++ b/test/parallel/test-readline-promises-interface.js @@ -1,7 +1,10 @@ // Flags: --expose-internals 'use strict'; const common = require('../common'); -common.skipIfDumbTerminal(); + +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const assert = require('assert'); const readline = require('readline/promises'); diff --git a/test/parallel/test-readline-promises-tab-complete.js b/test/parallel/test-readline-promises-tab-complete.js index fd32900e71d096..d8b0ac30ee779d 100644 --- a/test/parallel/test-readline-promises-tab-complete.js +++ b/test/parallel/test-readline-promises-tab-complete.js @@ -8,7 +8,9 @@ const assert = require('assert'); const { EventEmitter } = require('events'); const { getStringWidth } = require('internal/util/inspect'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} // This test verifies that the tab completion supports unicode and the writes // are limited to the minimum. diff --git a/test/parallel/test-readline-tab-complete.js b/test/parallel/test-readline-tab-complete.js index 64df237d56ad44..5b7b19102f412a 100644 --- a/test/parallel/test-readline-tab-complete.js +++ b/test/parallel/test-readline-tab-complete.js @@ -8,7 +8,9 @@ const assert = require('assert'); const EventEmitter = require('events').EventEmitter; const { getStringWidth } = require('internal/util/inspect'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} // This test verifies that the tab completion supports unicode and the writes // are limited to the minimum. diff --git a/test/parallel/test-readline-undefined-columns.js b/test/parallel/test-readline-undefined-columns.js index 25bafe957fa40a..d7000a16dd88a7 100644 --- a/test/parallel/test-readline-undefined-columns.js +++ b/test/parallel/test-readline-undefined-columns.js @@ -5,7 +5,9 @@ const assert = require('assert'); const PassThrough = require('stream').PassThrough; const readline = require('readline'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} // Checks that tab completion still works // when output column size is undefined diff --git a/test/parallel/test-readline.js b/test/parallel/test-readline.js index 77799fc14cf75f..0cf577942915a6 100644 --- a/test/parallel/test-readline.js +++ b/test/parallel/test-readline.js @@ -4,7 +4,9 @@ const { PassThrough } = require('stream'); const readline = require('readline'); const assert = require('assert'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} { const input = new PassThrough(); diff --git a/test/parallel/test-repl-autocomplete.js b/test/parallel/test-repl-autocomplete.js index cb17523494b2ff..a68322c501e264 100644 --- a/test/parallel/test-repl-autocomplete.js +++ b/test/parallel/test-repl-autocomplete.js @@ -9,7 +9,9 @@ const assert = require('assert'); const fs = require('fs'); const { inspect } = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-repl-editor.js b/test/parallel/test-repl-editor.js index e260f5e89174a8..fee647d0478e50 100644 --- a/test/parallel/test-repl-editor.js +++ b/test/parallel/test-repl-editor.js @@ -5,7 +5,9 @@ const assert = require('assert'); const repl = require('repl'); const ArrayStream = require('../common/arraystream'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} // \u001b[nG - Moves the cursor to n st column // \u001b[0J - Clear screen diff --git a/test/parallel/test-repl-history-navigation.js b/test/parallel/test-repl-history-navigation.js index 4df120d7cb9eae..64317be960e8d1 100644 --- a/test/parallel/test-repl-history-navigation.js +++ b/test/parallel/test-repl-history-navigation.js @@ -9,7 +9,9 @@ const assert = require('assert'); const fs = require('fs'); const { inspect } = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-repl-load-multiline-no-trailing-newline.js b/test/parallel/test-repl-load-multiline-no-trailing-newline.js index f57638d2521bbe..8fda91e35d1030 100644 --- a/test/parallel/test-repl-load-multiline-no-trailing-newline.js +++ b/test/parallel/test-repl-load-multiline-no-trailing-newline.js @@ -5,7 +5,9 @@ const fixtures = require('../common/fixtures'); const assert = require('assert'); const repl = require('repl'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const command = `.load ${fixtures.path('repl-load-multiline-no-trailing-newline.js')}`; const terminalCode = '\u001b[1G\u001b[0J \u001b[1G'; diff --git a/test/parallel/test-repl-load-multiline.js b/test/parallel/test-repl-load-multiline.js index 4fcf206bef1be1..920f4b1c25d144 100644 --- a/test/parallel/test-repl-load-multiline.js +++ b/test/parallel/test-repl-load-multiline.js @@ -5,7 +5,9 @@ const fixtures = require('../common/fixtures'); const assert = require('assert'); const repl = require('repl'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const command = `.load ${fixtures.path('repl-load-multiline.js')}`; const terminalCode = '\u001b[1G\u001b[0J \u001b[1G'; diff --git a/test/parallel/test-repl-mode.js b/test/parallel/test-repl-mode.js index aca8418904d082..f8a54d34089b00 100644 --- a/test/parallel/test-repl-mode.js +++ b/test/parallel/test-repl-mode.js @@ -4,7 +4,9 @@ const assert = require('assert'); const Stream = require('stream'); const repl = require('repl'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const tests = [ testSloppyMode, diff --git a/test/parallel/test-repl-permission-model.js b/test/parallel/test-repl-permission-model.js index 938f5121163a23..ab5c7bff06cde8 100644 --- a/test/parallel/test-repl-permission-model.js +++ b/test/parallel/test-repl-permission-model.js @@ -8,7 +8,9 @@ const REPL = require('internal/repl'); const assert = require('assert'); const { inspect } = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} // Create an input stream specialized for testing an array of actions class ActionStream extends stream.Stream { diff --git a/test/parallel/test-repl-persistent-history.js b/test/parallel/test-repl-persistent-history.js index 99ba92eda4cf3d..f5e2d48139f449 100644 --- a/test/parallel/test-repl-persistent-history.js +++ b/test/parallel/test-repl-persistent-history.js @@ -11,7 +11,9 @@ const fs = require('fs'); const os = require('os'); const util = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-repl-programmatic-history.js b/test/parallel/test-repl-programmatic-history.js index 1ae5123c6c8ea1..aae15eb752c862 100644 --- a/test/parallel/test-repl-programmatic-history.js +++ b/test/parallel/test-repl-programmatic-history.js @@ -9,7 +9,9 @@ const fs = require('fs'); const os = require('os'); const util = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); diff --git a/test/parallel/test-repl-require-self-referential.js b/test/parallel/test-repl-require-self-referential.js index 7ced6dbf11721e..9a4fe000bbb7e3 100644 --- a/test/parallel/test-repl-require-self-referential.js +++ b/test/parallel/test-repl-require-self-referential.js @@ -4,9 +4,11 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); const assert = require('assert'); const { spawn } = require('child_process'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const selfRefModule = fixtures.path('self_ref_module'); const child = spawn(process.execPath, diff --git a/test/parallel/test-repl-require.js b/test/parallel/test-repl-require.js index fc431dea9f0f69..e740acef08b068 100644 --- a/test/parallel/test-repl-require.js +++ b/test/parallel/test-repl-require.js @@ -4,9 +4,11 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); const assert = require('assert'); const net = require('net'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} process.chdir(fixtures.fixturesDir); const repl = require('repl'); diff --git a/test/parallel/test-repl-reverse-search.js b/test/parallel/test-repl-reverse-search.js index 93fb037c392c01..246488cbd0ef5f 100644 --- a/test/parallel/test-repl-reverse-search.js +++ b/test/parallel/test-repl-reverse-search.js @@ -9,7 +9,10 @@ const assert = require('assert'); const fs = require('fs'); const { inspect } = require('util'); -common.skipIfDumbTerminal(); +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} + common.allowGlobals('aaaa'); const tmpdir = require('../common/tmpdir'); diff --git a/test/parallel/test-repl-sigint-nested-eval.js b/test/parallel/test-repl-sigint-nested-eval.js index 62eb46e0af6759..7955cf413f7c49 100644 --- a/test/parallel/test-repl-sigint-nested-eval.js +++ b/test/parallel/test-repl-sigint-nested-eval.js @@ -4,8 +4,12 @@ if (common.isWindows) { // No way to send CTRL_C_EVENT to processes from JS right now. common.skip('platform not supported'); } -if (!common.isMainThread) + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('No signal handling available in Workers'); +} const assert = require('assert'); const spawn = require('child_process').spawn; diff --git a/test/parallel/test-repl-sigint.js b/test/parallel/test-repl-sigint.js index 8ad0b2f5c2c853..f4087b11d488d6 100644 --- a/test/parallel/test-repl-sigint.js +++ b/test/parallel/test-repl-sigint.js @@ -4,8 +4,12 @@ if (common.isWindows) { // No way to send CTRL_C_EVENT to processes from JS right now. common.skip('platform not supported'); } -if (!common.isMainThread) + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('No signal handling available in Workers'); +} const assert = require('assert'); const spawn = require('child_process').spawn; diff --git a/test/parallel/test-repl-strict-mode-previews.js b/test/parallel/test-repl-strict-mode-previews.js index a05e11b39cf3ee..e7fc1ea5191ea3 100644 --- a/test/parallel/test-repl-strict-mode-previews.js +++ b/test/parallel/test-repl-strict-mode-previews.js @@ -5,7 +5,10 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfDumbTerminal(); + +if (process.env.TERM === 'dumb') { + common.skip('skipping - dumb terminal'); +} if (process.argv[2] === 'child') { const stream = require('stream'); diff --git a/test/parallel/test-repl-tab-complete-import.js b/test/parallel/test-repl-tab-complete-import.js index fe9f7a3d11795b..f4ef408c89174c 100644 --- a/test/parallel/test-repl-tab-complete-import.js +++ b/test/parallel/test-repl-tab-complete-import.js @@ -7,8 +7,11 @@ const assert = require('assert'); const { builtinModules } = require('module'); const publicUnprefixedModules = builtinModules.filter((lib) => !lib.startsWith('_') && !lib.startsWith('node:')); -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} // We have to change the directory to ../fixtures before requiring repl // in order to make the tests for completion of node_modules work properly diff --git a/test/parallel/test-repl-tab-complete.js b/test/parallel/test-repl-tab-complete.js index ff1e927078ddf5..c79162129bd69b 100644 --- a/test/parallel/test-repl-tab-complete.js +++ b/test/parallel/test-repl-tab-complete.js @@ -34,9 +34,11 @@ const { builtinModules } = require('module'); const publicModules = builtinModules.filter((lib) => !lib.startsWith('_')); const hasInspector = process.features.inspector; +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} // We have to change the directory to ../fixtures before requiring repl // in order to make the tests for completion of node_modules work properly diff --git a/test/parallel/test-require-symlink.js b/test/parallel/test-require-symlink.js index 0c4477023bc90b..9ca543e8d64ca4 100644 --- a/test/parallel/test-require-symlink.js +++ b/test/parallel/test-require-symlink.js @@ -2,10 +2,14 @@ 'use strict'; const common = require('../common'); -if (!common.canCreateSymLink()) +if (!common.canCreateSymLink()) { common.skip('insufficient privileges'); -if (!common.isMainThread) +} +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const assert = require('assert'); const { spawn } = require('child_process'); diff --git a/test/parallel/test-runner-module-mocking.js b/test/parallel/test-runner-module-mocking.js index cb40df98147302..8502d4aa99a9b6 100644 --- a/test/parallel/test-runner-module-mocking.js +++ b/test/parallel/test-runner-module-mocking.js @@ -1,8 +1,9 @@ // Flags: --experimental-test-module-mocks --experimental-require-module 'use strict'; const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) { +if (!isMainThread) { common.skip('registering customization hooks in Workers does not work'); } diff --git a/test/parallel/test-set-process-debug-port.js b/test/parallel/test-set-process-debug-port.js index d00a1ddf68ebb6..7f0cbe068549d0 100644 --- a/test/parallel/test-set-process-debug-port.js +++ b/test/parallel/test-set-process-debug-port.js @@ -2,7 +2,11 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); const kMinPort = 1024; diff --git a/test/parallel/test-setproctitle.js b/test/parallel/test-setproctitle.js index 7c4287829f7c0a..b08302e0a35ac0 100644 --- a/test/parallel/test-setproctitle.js +++ b/test/parallel/test-setproctitle.js @@ -1,15 +1,16 @@ 'use strict'; // Original test written by Jakub Lekstan const common = require('../common'); +const { isMainThread } = require('worker_threads'); // FIXME add sunos support -if (common.isSunOS) +if (common.isSunOS || common.isIBMi) { common.skip(`Unsupported platform [${process.platform}]`); -// FIXME add IBMi support -if (common.isIBMi) - common.skip('Unsupported platform IBMi'); -if (!common.isMainThread) +} + +if (!isMainThread) { common.skip('Setting the process title from Workers is not supported'); +} const assert = require('assert'); const { exec, execSync } = require('child_process'); @@ -25,8 +26,9 @@ process.title = title; assert.strictEqual(process.title, title); // Test setting the title but do not try to run `ps` on Windows. -if (common.isWindows) +if (common.isWindows) { common.skip('Windows does not have "ps" utility'); +} try { execSync('command -v ps'); diff --git a/test/parallel/test-shadow-realm-import-value-resolve.js b/test/parallel/test-shadow-realm-import-value-resolve.js index ee1c17d67c12f1..eeb00509d53a6c 100644 --- a/test/parallel/test-shadow-realm-import-value-resolve.js +++ b/test/parallel/test-shadow-realm-import-value-resolve.js @@ -3,8 +3,11 @@ const common = require('../common'); const assert = require('assert'); const path = require('path'); +const { isMainThread } = require('worker_threads'); -common.skipIfWorker('process.chdir is not supported in workers.'); +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} async function main() { const realm = new ShadowRealm(); diff --git a/test/parallel/test-signal-args.js b/test/parallel/test-signal-args.js index 7b72ed6dcb92d5..28a077ecc1c7d9 100644 --- a/test/parallel/test-signal-args.js +++ b/test/parallel/test-signal-args.js @@ -3,10 +3,15 @@ const common = require('../common'); const assert = require('assert'); -if (common.isWindows) +if (common.isWindows) { common.skip('Sending signals with process.kill is not supported on Windows'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('No signal handling available in Workers'); +} process.once('SIGINT', common.mustCall((signal) => { assert.strictEqual(signal, 'SIGINT'); diff --git a/test/parallel/test-signal-handler.js b/test/parallel/test-signal-handler.js index 05ec4e7f73faf5..b84d2063a288db 100644 --- a/test/parallel/test-signal-handler.js +++ b/test/parallel/test-signal-handler.js @@ -23,10 +23,15 @@ const common = require('../common'); -if (common.isWindows) +if (common.isWindows) { common.skip('SIGUSR1 and SIGHUP signals are not supported'); -if (!common.isMainThread) +} + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('Signal handling in Workers is not supported'); +} console.log(`process.pid: ${process.pid}`); diff --git a/test/parallel/test-stdio-pipe-access.js b/test/parallel/test-stdio-pipe-access.js index ac0e22c399a1b9..6bf6b107c60e92 100644 --- a/test/parallel/test-stdio-pipe-access.js +++ b/test/parallel/test-stdio-pipe-access.js @@ -1,7 +1,10 @@ 'use strict'; const common = require('../common'); -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip("Workers don't have process-like stdio"); +} // Test if Node handles accessing process.stdin if it is a redirected // pipe without deadlocking diff --git a/test/parallel/test-stdio-pipe-redirect.js b/test/parallel/test-stdio-pipe-redirect.js index 8b48133c8b0317..69367119ed3402 100644 --- a/test/parallel/test-stdio-pipe-redirect.js +++ b/test/parallel/test-stdio-pipe-redirect.js @@ -1,7 +1,10 @@ 'use strict'; const common = require('../common'); -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip("Workers don't have process-like stdio"); +} // Test if Node handles redirecting one child process stdout to another // process stdin without crashing. diff --git a/test/parallel/test-timers-immediate-unref-simple.js b/test/parallel/test-timers-immediate-unref-simple.js index 369894fcdebbae..fae8ad3eaea801 100644 --- a/test/parallel/test-timers-immediate-unref-simple.js +++ b/test/parallel/test-timers-immediate-unref-simple.js @@ -1,8 +1,9 @@ 'use strict'; const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) { +if (!isMainThread) { // Note that test-timers-immediate-unref-nested-once works instead. common.skip('Worker bootstrapping works differently -> different timing'); } diff --git a/test/parallel/test-trace-events-api.js b/test/parallel/test-trace-events-api.js index 709f8de9097906..8792a40cf00c80 100644 --- a/test/parallel/test-trace-events-api.js +++ b/test/parallel/test-trace-events-api.js @@ -2,7 +2,12 @@ 'use strict'; const common = require('../common'); -common.skipIfWorker(); // https://github.com/nodejs/node/issues/22767 +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + // https://github.com/nodejs/node/issues/22767 + common.skip('This test only works on a main thread'); +} try { require('trace_events'); diff --git a/test/parallel/test-trace-events-dynamic-enable.js b/test/parallel/test-trace-events-dynamic-enable.js index 69251944031e1f..5b2ce313421568 100644 --- a/test/parallel/test-trace-events-dynamic-enable.js +++ b/test/parallel/test-trace-events-dynamic-enable.js @@ -4,7 +4,13 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); // https://github.com/nodejs/node/issues/22767 + +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + // https://github.com/nodejs/node/issues/22767 + common.skip('This test only works on a main thread'); +} const { internalBinding } = require('internal/test/binding'); diff --git a/test/parallel/test-warn-sigprof.js b/test/parallel/test-warn-sigprof.js index 36b0db78d82687..929deb69addb17 100644 --- a/test/parallel/test-warn-sigprof.js +++ b/test/parallel/test-warn-sigprof.js @@ -7,10 +7,15 @@ const common = require('../common'); common.skipIfInspectorDisabled(); -if (common.isWindows) +if (common.isWindows) { common.skip('test does not apply to Windows'); +} -common.skipIfWorker(); // Worker inspector never has a server running +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} common.expectWarning('Warning', 'process.on(SIGPROF) is reserved while debugging'); diff --git a/test/parallel/test-worker-name.js b/test/parallel/test-worker-name.js index 952fcee0e05429..30f3710a826caf 100644 --- a/test/parallel/test-worker-name.js +++ b/test/parallel/test-worker-name.js @@ -4,10 +4,17 @@ const common = require('../common'); const fixtures = require('../common/fixtures'); common.skipIfInspectorDisabled(); -common.skipIfWorker(); // This test requires both main and worker threads. + +const { + Worker, + isMainThread, +} = require('worker_threads'); + +if (!isMainThread) { + common.skip('This test only works on a main thread'); +} const assert = require('assert'); -const { Worker, isMainThread } = require('worker_threads'); if (isMainThread) { const name = 'Hello Thread'; diff --git a/test/report/test-report-signal.js b/test/report/test-report-signal.js index cb5efd9fc39fe2..03908ddcf24f16 100644 --- a/test/report/test-report-signal.js +++ b/test/report/test-report-signal.js @@ -3,11 +3,15 @@ // Test producing a report via signal. const common = require('../common'); -if (common.isWindows) +if (common.isWindows) { return common.skip('Unsupported on Windows.'); +} -if (!common.isMainThread) +const { isMainThread } = require('worker_threads'); + +if (!isMainThread) { common.skip('Signal reporting is only supported in the main thread'); +} const assert = require('assert'); const helper = require('../common/report'); diff --git a/test/sequential/test-fs-watch.js b/test/sequential/test-fs-watch.js index cb12acfc115a4b..8db27a79e33d0a 100644 --- a/test/sequential/test-fs-watch.js +++ b/test/sequential/test-fs-watch.js @@ -29,9 +29,11 @@ const fs = require('fs'); const path = require('path'); const tmpdir = require('../common/tmpdir'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const expectFilePath = common.isWindows || common.isLinux || diff --git a/test/sequential/test-heapdump.js b/test/sequential/test-heapdump.js index 1388623e61f939..f9df88375ae596 100644 --- a/test/sequential/test-heapdump.js +++ b/test/sequential/test-heapdump.js @@ -1,9 +1,11 @@ 'use strict'; const common = require('../common'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} const { writeHeapSnapshot, getHeapSnapshot } = require('v8'); const assert = require('assert'); diff --git a/test/sequential/test-init.js b/test/sequential/test-init.js index 7195369e0e4f8e..dd5db5640d1f0c 100644 --- a/test/sequential/test-init.js +++ b/test/sequential/test-init.js @@ -24,9 +24,11 @@ const common = require('../common'); const assert = require('assert'); const child = require('child_process'); const fixtures = require('../common/fixtures'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) { common.skip('process.chdir is not available in Workers'); +} if (process.env.TEST_INIT) { return process.stdout.write('Loaded successfully!'); diff --git a/test/sequential/test-perf-hooks.js b/test/sequential/test-perf-hooks.js index 1e11f26571480d..847decdef18bfc 100644 --- a/test/sequential/test-perf-hooks.js +++ b/test/sequential/test-perf-hooks.js @@ -1,11 +1,12 @@ 'use strict'; -const common = require('../common'); +require('../common'); const { performance } = require('perf_hooks'); // Get the start time as soon as possible. const testStartTime = performance.now(); const assert = require('assert'); const { writeSync } = require('fs'); +const { isMainThread } = require('worker_threads'); // Use writeSync to stdout to avoid disturbing the loop. function log(str) { @@ -131,7 +132,7 @@ function checkValue(timing, name, min, max) { } let loopStart = initialTiming.loopStart; -if (common.isMainThread) { +if (isMainThread) { // In the main thread, the loop does not start until we start an operation // that requires it, e.g. setTimeout(). assert.strictEqual(initialTiming.loopStart, -1); From bf34a49206cb2ef768410fbb1f6216dce9824075 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 22 Jan 2025 15:08:23 -0800 Subject: [PATCH 128/158] test: make common/index slightly less node.js specific * s/global/globalThis * clean up knownGlobals a bit, make it a Set instead of an array and condense a bit. PR-URL: https://github.com/nodejs/node/pull/56712 Reviewed-By: Yagiz Nizipli Reviewed-By: Matteo Collina --- test/common/index.js | 140 ++++++++++++++++++------------------------- 1 file changed, 57 insertions(+), 83 deletions(-) diff --git a/test/common/index.js b/test/common/index.js index 6086d584f0b595..3647f4554a4647 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -20,7 +20,7 @@ // USE OR OTHER DEALINGS IN THE SOFTWARE. 'use strict'; -const process = global.process; // Some tests tamper with the process global. +const process = globalThis.process; // Some tests tamper with the process globalThis. const assert = require('assert'); const { exec, execSync, spawn, spawnSync } = require('child_process'); @@ -266,7 +266,7 @@ function platformTimeout(ms) { return ms; } -let knownGlobals = [ +const knownGlobals = new Set([ AbortController, atob, btoa, @@ -278,88 +278,59 @@ let knownGlobals = [ setInterval, setTimeout, queueMicrotask, -]; - -if (global.gc) { - knownGlobals.push(global.gc); -} - -if (global.navigator) { - knownGlobals.push(global.navigator); -} - -if (global.Navigator) { - knownGlobals.push(global.Navigator); -} - -if (global.Performance) { - knownGlobals.push(global.Performance); -} -if (global.performance) { - knownGlobals.push(global.performance); -} -if (global.PerformanceMark) { - knownGlobals.push(global.PerformanceMark); -} -if (global.PerformanceMeasure) { - knownGlobals.push(global.PerformanceMeasure); -} - -// TODO(@ethan-arrowood): Similar to previous checks, this can be temporary -// until v16.x is EOL. Once all supported versions have structuredClone we -// can add this to the list above instead. -if (global.structuredClone) { - knownGlobals.push(global.structuredClone); -} - -if (global.EventSource) { - knownGlobals.push(EventSource); -} - -if (global.fetch) { - knownGlobals.push(fetch); -} -if (hasCrypto && global.crypto) { - knownGlobals.push(global.crypto); - knownGlobals.push(global.Crypto); - knownGlobals.push(global.CryptoKey); - knownGlobals.push(global.SubtleCrypto); -} -if (global.CustomEvent) { - knownGlobals.push(global.CustomEvent); -} -if (global.ReadableStream) { - knownGlobals.push( - global.ReadableStream, - global.ReadableStreamDefaultReader, - global.ReadableStreamBYOBReader, - global.ReadableStreamBYOBRequest, - global.ReadableByteStreamController, - global.ReadableStreamDefaultController, - global.TransformStream, - global.TransformStreamDefaultController, - global.WritableStream, - global.WritableStreamDefaultWriter, - global.WritableStreamDefaultController, - global.ByteLengthQueuingStrategy, - global.CountQueuingStrategy, - global.TextEncoderStream, - global.TextDecoderStream, - global.CompressionStream, - global.DecompressionStream, - ); -} + structuredClone, + fetch, +]); + +['gc', + // The following are assumed to be conditionally available in the + // global object currently. They can likely be added to the fixed + // set of known globals, however. + 'navigator', + 'Navigator', + 'performance', + 'Performance', + 'PerformanceMark', + 'PerformanceMeasure', + 'EventSource', + 'CustomEvent', + 'ReadableStream', + 'ReadableStreamDefaultReader', + 'ReadableStreamBYOBReader', + 'ReadableStreamBYOBRequest', + 'ReadableByteStreamController', + 'ReadableStreamDefaultController', + 'TransformStream', + 'TransformStreamDefaultController', + 'WritableStream', + 'WritableStreamDefaultWriter', + 'WritableStreamDefaultController', + 'ByteLengthQueuingStrategy', + 'CountQueuingStrategy', + 'TextEncoderStream', + 'TextDecoderStream', + 'CompressionStream', + 'DecompressionStream', + 'Storage', + 'localStorage', + 'sessionStorage', +].forEach((i) => { + if (globalThis[i] !== undefined) { + knownGlobals.add(globalThis[i]); + } +}); -if (global.Storage) { - knownGlobals.push( - global.localStorage, - global.sessionStorage, - global.Storage, - ); +if (hasCrypto) { + knownGlobals.add(globalThis.crypto); + knownGlobals.add(globalThis.Crypto); + knownGlobals.add(globalThis.CryptoKey); + knownGlobals.add(globalThis.SubtleCrypto); } function allowGlobals(...allowlist) { - knownGlobals = knownGlobals.concat(allowlist); + for (const val of allowlist) { + knownGlobals.add(val); + } } if (process.env.NODE_TEST_KNOWN_GLOBALS !== '0') { @@ -371,10 +342,13 @@ if (process.env.NODE_TEST_KNOWN_GLOBALS !== '0') { function leakedGlobals() { const leaked = []; - for (const val in global) { + for (const val in globalThis) { // globalThis.crypto is a getter that throws if Node.js was compiled - // without OpenSSL. - if (val !== 'crypto' && !knownGlobals.includes(global[val])) { + // without OpenSSL so we'll skip it if it is not available. + if (val === 'crypto' && !hasCrypto) { + continue; + } + if (!knownGlobals.has(globalThis[val])) { leaked.push(val); } } From 917f98b29c6563a0e2defa1072e19afeae1e1ab7 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 22 Jan 2025 15:30:30 -0800 Subject: [PATCH 129/158] test: replace more uses of `global` with `globalThis` PR-URL: https://github.com/nodejs/node/pull/56712 Reviewed-By: Yagiz Nizipli Reviewed-By: Matteo Collina --- test/parallel/test-aborted-util.js | 2 +- .../test-abortsignal-drop-settled-signals.mjs | 10 +-- test/parallel/test-assert-checktag.js | 8 +-- .../test-async-hooks-destroy-on-gc.js | 2 +- .../test-async-hooks-disable-gc-tracking.js | 2 +- ...test-async-hooks-prevent-double-destroy.js | 2 +- test/parallel/test-cli-eval.js | 2 +- test/parallel/test-common-gc.js | 4 +- .../parallel/test-console-assign-undefined.js | 16 ++--- test/parallel/test-console-instance.js | 4 +- test/parallel/test-console-self-assign.js | 2 +- test/parallel/test-crypto-dh-leak.js | 2 +- test/parallel/test-domain-crypto.js | 2 +- .../test-eventtarget-memoryleakwarning.js | 2 +- test/parallel/test-eventtarget.js | 2 +- .../test-finalization-registry-shutdown.js | 2 +- test/parallel/test-fs-filehandle.js | 2 +- .../test-fs-promises-file-handle-close.js | 2 +- test/parallel/test-fs-write-reuse-callback.js | 2 +- test/parallel/test-fs-write.js | 8 ++- .../test-gc-http-client-connaborted.js | 2 +- test/parallel/test-gc-net-timeout.js | 2 +- test/parallel/test-gc-tls-external-memory.js | 2 +- test/parallel/test-global-setters.js | 12 ++-- test/parallel/test-global.js | 14 ++-- ...-h2leak-destroy-session-on-socket-ended.js | 4 +- .../test-heapdump-async-hooks-init-promise.js | 2 +- .../test-http-agent-domain-reused-gc.js | 2 +- test/parallel/test-http-parser-bad-ref.js | 2 +- ...t-http-server-connections-checking-leak.js | 2 +- .../test-http-server-keepalive-req-gc.js | 4 +- test/parallel/test-http2-createwritereq.js | 2 +- ...-http2-session-gc-while-write-scheduled.js | 2 +- ...tp2-write-finishes-after-stream-destroy.js | 2 +- ...-https-server-connections-checking-leak.js | 2 +- .../test-inspector-scriptparsed-context.js | 4 +- ...r-vm-global-accessors-getter-sideeffect.js | 2 +- test/parallel/test-module-relative-lookup.js | 2 +- test/parallel/test-net-connect-memleak.js | 2 +- .../test-net-write-fully-async-buffer.js | 2 +- .../test-net-write-fully-async-hex-string.js | 2 +- test/parallel/test-performance-gc.js | 4 +- test/parallel/test-primitive-timer-leak.js | 2 +- test/parallel/test-repl-autolibs.js | 6 +- test/parallel/test-repl-underscore.js | 4 +- test/parallel/test-repl-use-global.js | 6 +- test/parallel/test-repl.js | 6 +- test/parallel/test-runner-mock-timers.js | 68 +++++++++---------- test/parallel/test-timers-api-refs.js | 12 ++-- .../parallel/test-timers-process-tampering.js | 4 +- test/parallel/test-tls-connect-memleak.js | 2 +- test/parallel/test-tls-securepair-leak.js | 2 +- ...test-tls-transport-destroy-after-own-gc.js | 6 +- test/parallel/test-trace-events-api.js | 2 +- test/parallel/test-util-format.js | 4 +- test/parallel/test-util-inspect.js | 6 +- test/parallel/test-vm-basic.js | 6 +- .../test-vm-create-and-run-in-context.js | 2 +- test/parallel/test-vm-cross-context.js | 2 +- test/parallel/test-vm-global-get-own.js | 26 +++---- test/parallel/test-vm-measure-memory-lazy.js | 10 +-- test/parallel/test-vm-module-basic.js | 12 ++-- .../test-vm-new-script-new-context.js | 34 +++++----- .../test-vm-new-script-this-context.js | 32 ++++----- test/parallel/test-vm-run-in-new-context.js | 34 +++++----- test/parallel/test-vm-static-this.js | 26 +++---- test/parallel/test-webstorage.js | 4 +- .../parallel/test-whatwg-url-custom-global.js | 4 +- test/parallel/test-worker-cli-options.js | 2 +- ...orker-message-channel-sharedarraybuffer.js | 2 +- .../parallel/test-worker-message-port-move.js | 2 +- ...est-worker-workerdata-sharedarraybuffer.js | 2 +- .../test-zlib-invalid-input-memory.js | 2 +- test/parallel/test-zlib-unused-weak.js | 4 +- 74 files changed, 246 insertions(+), 242 deletions(-) diff --git a/test/parallel/test-aborted-util.js b/test/parallel/test-aborted-util.js index 4bc45b9f5529bb..0566204ccdb074 100644 --- a/test/parallel/test-aborted-util.js +++ b/test/parallel/test-aborted-util.js @@ -32,7 +32,7 @@ test('Aborted with gc cleanup', async () => { const { promise, resolve } = Promise.withResolvers(); setImmediate(() => { - global.gc(); + globalThis.gc(); ac.abort(); strictEqual(ac.signal.aborted, true); strictEqual(getEventListeners(ac.signal, 'abort').length, 0); diff --git a/test/parallel/test-abortsignal-drop-settled-signals.mjs b/test/parallel/test-abortsignal-drop-settled-signals.mjs index 0abcaf81012716..b300b0e223fc93 100644 --- a/test/parallel/test-abortsignal-drop-settled-signals.mjs +++ b/test/parallel/test-abortsignal-drop-settled-signals.mjs @@ -16,7 +16,7 @@ function makeSubsequentCalls(limit, done, holdReferences = false) { // This setImmediate is necessary to ensure that in the last iteration the remaining signal is GCed (if not // retained) setImmediate(() => { - global.gc(); + globalThis.gc(); done(ac.signal, dependantSymbol); }); return; @@ -50,7 +50,7 @@ function runShortLivedSourceSignal(limit, done) { function run(iteration) { if (iteration > limit) { - global.gc(); + globalThis.gc(); done(signalRefs); return; } @@ -74,9 +74,9 @@ function runWithOrphanListeners(limit, done) { const ac = new AbortController(); if (iteration > limit) { setImmediate(() => { - global.gc(); + globalThis.gc(); setImmediate(() => { - global.gc(); + globalThis.gc(); done(composedSignalRefs); }); @@ -147,7 +147,7 @@ it('drops settled dependant signals when signal is composite', (t, done) => { t.assert.strictEqual(controllers[1].signal[kDependantSignals].size, 1); setImmediate(() => { - global.gc({ execution: 'async' }).then(async () => { + globalThis.gc({ execution: 'async' }).then(async () => { await gcUntil('all signals are GCed', () => { const totalDependantSignals = Math.max( controllers[0].signal[kDependantSignals].size, diff --git a/test/parallel/test-assert-checktag.js b/test/parallel/test-assert-checktag.js index 7587939436f40d..b86a1bde7f096d 100644 --- a/test/parallel/test-assert-checktag.js +++ b/test/parallel/test-assert-checktag.js @@ -49,13 +49,13 @@ test('', { skip: !hasCrypto }, () => { { // At the moment global has its own type tag const fakeGlobal = {}; - Object.setPrototypeOf(fakeGlobal, Object.getPrototypeOf(global)); - for (const prop of Object.keys(global)) { + Object.setPrototypeOf(fakeGlobal, Object.getPrototypeOf(globalThis)); + for (const prop of Object.keys(globalThis)) { fakeGlobal[prop] = global[prop]; } - assert.notDeepEqual(fakeGlobal, global); + assert.notDeepEqual(fakeGlobal, globalThis); // Message will be truncated anyway, don't validate - assert.throws(() => assert.deepStrictEqual(fakeGlobal, global), + assert.throws(() => assert.deepStrictEqual(fakeGlobal, globalThis), assert.AssertionError); } diff --git a/test/parallel/test-async-hooks-destroy-on-gc.js b/test/parallel/test-async-hooks-destroy-on-gc.js index fe6325e189734b..dd7eef8776cdf3 100644 --- a/test/parallel/test-async-hooks-destroy-on-gc.js +++ b/test/parallel/test-async-hooks-destroy-on-gc.js @@ -22,6 +22,6 @@ let asyncId = null; } setImmediate(() => { - global.gc(); + globalThis.gc(); setImmediate(() => assert.ok(destroyedIds.has(asyncId))); }); diff --git a/test/parallel/test-async-hooks-disable-gc-tracking.js b/test/parallel/test-async-hooks-disable-gc-tracking.js index 84c5043aad3335..87b096c258121c 100644 --- a/test/parallel/test-async-hooks-disable-gc-tracking.js +++ b/test/parallel/test-async-hooks-disable-gc-tracking.js @@ -14,7 +14,7 @@ const hook = async_hooks.createHook({ new async_hooks.AsyncResource('foobar', { requireManualDestroy: true }); setImmediate(() => { - global.gc(); + globalThis.gc(); setImmediate(() => { hook.disable(); }); diff --git a/test/parallel/test-async-hooks-prevent-double-destroy.js b/test/parallel/test-async-hooks-prevent-double-destroy.js index 689dc399f9d2f2..4aa55a5a6c87bf 100644 --- a/test/parallel/test-async-hooks-prevent-double-destroy.js +++ b/test/parallel/test-async-hooks-prevent-double-destroy.js @@ -17,7 +17,7 @@ const hook = async_hooks.createHook({ } setImmediate(() => { - global.gc(); + globalThis.gc(); setImmediate(() => { hook.disable(); }); diff --git a/test/parallel/test-cli-eval.js b/test/parallel/test-cli-eval.js index 24031581fd737e..9ec0fece409068 100644 --- a/test/parallel/test-cli-eval.js +++ b/test/parallel/test-cli-eval.js @@ -345,7 +345,7 @@ child.exec( // Regression test for https://github.com/nodejs/node/issues/45336 child.execFile(process.execPath, ['-p', - 'Object.defineProperty(global, "fs", { configurable: false });' + + 'Object.defineProperty(globalThis, "fs", { configurable: false });' + 'fs === require("node:fs")'], common.mustSucceed((stdout) => { assert.match(stdout, /^true/); diff --git a/test/parallel/test-common-gc.js b/test/parallel/test-common-gc.js index f7d73ccd0423e3..54abe3695cc3be 100644 --- a/test/parallel/test-common-gc.js +++ b/test/parallel/test-common-gc.js @@ -5,10 +5,10 @@ const { onGC } = require('../common/gc'); { onGC({}, { ongc: common.mustCall() }); - global.gc(); + globalThis.gc(); } { onGC(process, { ongc: common.mustNotCall() }); - global.gc(); + globalThis.gc(); } diff --git a/test/parallel/test-console-assign-undefined.js b/test/parallel/test-console-assign-undefined.js index 1021307b3c5f22..7f5b0e04727679 100644 --- a/test/parallel/test-console-assign-undefined.js +++ b/test/parallel/test-console-assign-undefined.js @@ -1,28 +1,28 @@ 'use strict'; -// Patch global.console before importing modules that may modify the console +// Patch globalThis.console before importing modules that may modify the console // object. -const tmp = global.console; -global.console = 42; +const tmp = globalThis.console; +globalThis.console = 42; require('../common'); const assert = require('assert'); // Originally the console had a getter. Test twice to verify it had no side // effect. -assert.strictEqual(global.console, 42); -assert.strictEqual(global.console, 42); +assert.strictEqual(globalThis.console, 42); +assert.strictEqual(globalThis.console, 42); assert.throws( () => console.log('foo'), { name: 'TypeError' } ); -global.console = 1; -assert.strictEqual(global.console, 1); +globalThis.console = 1; +assert.strictEqual(globalThis.console, 1); assert.strictEqual(console, 1); // Reset the console -global.console = tmp; +globalThis.console = tmp; console.log('foo'); diff --git a/test/parallel/test-console-instance.js b/test/parallel/test-console-instance.js index bf22314e22e031..0364a6213bc726 100644 --- a/test/parallel/test-console-instance.js +++ b/test/parallel/test-console-instance.js @@ -36,9 +36,9 @@ process.stdout.write = process.stderr.write = common.mustNotCall(); // Make sure that the "Console" function exists. assert.strictEqual(typeof Console, 'function'); -assert.strictEqual(requiredConsole, global.console); +assert.strictEqual(requiredConsole, globalThis.console); // Make sure the custom instanceof of Console works -assert.ok(global.console instanceof Console); +assert.ok(globalThis.console instanceof Console); assert.ok(!({} instanceof Console)); // Make sure that the Console constructor throws diff --git a/test/parallel/test-console-self-assign.js b/test/parallel/test-console-self-assign.js index 53c54ab9a327cf..46f9bc93d4f2bf 100644 --- a/test/parallel/test-console-self-assign.js +++ b/test/parallel/test-console-self-assign.js @@ -3,4 +3,4 @@ require('../common'); // Assigning to itself should not throw. -global.console = global.console; // eslint-disable-line no-self-assign +globalThis.console = globalThis.console; // eslint-disable-line no-self-assign diff --git a/test/parallel/test-crypto-dh-leak.js b/test/parallel/test-crypto-dh-leak.js index 3b5051feb43cd8..df1ba89737c619 100644 --- a/test/parallel/test-crypto-dh-leak.js +++ b/test/parallel/test-crypto-dh-leak.js @@ -22,7 +22,7 @@ const before = process.memoryUsage.rss(); dh.setPrivateKey(privateKey); } } -global.gc(); +globalThis.gc(); const after = process.memoryUsage.rss(); // RSS should stay the same, ceteris paribus, but allow for diff --git a/test/parallel/test-domain-crypto.js b/test/parallel/test-domain-crypto.js index e0a470bd9db515..47eb33f70aae45 100644 --- a/test/parallel/test-domain-crypto.js +++ b/test/parallel/test-domain-crypto.js @@ -31,7 +31,7 @@ const crypto = require('crypto'); // Pollution of global is intentional as part of test. common.allowGlobals(require('domain')); // See https://github.com/nodejs/node/commit/d1eff9ab -global.domain = require('domain'); +globalThis.domain = require('domain'); // Should not throw a 'TypeError: undefined is not a function' exception crypto.randomBytes(8); diff --git a/test/parallel/test-eventtarget-memoryleakwarning.js b/test/parallel/test-eventtarget-memoryleakwarning.js index 2c907165d865d9..2ec48720c8a8c5 100644 --- a/test/parallel/test-eventtarget-memoryleakwarning.js +++ b/test/parallel/test-eventtarget-memoryleakwarning.js @@ -103,7 +103,7 @@ common.expectWarning({ }); await setTimeout(0); - global.gc(); + globalThis.gc(); } })().then(common.mustCall(), common.mustNotCall()); } diff --git a/test/parallel/test-eventtarget.js b/test/parallel/test-eventtarget.js index cbe7eb3b0e8687..7153da172c1cf6 100644 --- a/test/parallel/test-eventtarget.js +++ b/test/parallel/test-eventtarget.js @@ -683,7 +683,7 @@ let asyncTest = Promise.resolve(); const et = new EventTarget(); et.addEventListener('foo', common.mustNotCall(), { [kWeakHandler]: {} }); setImmediate(() => { - global.gc(); + globalThis.gc(); et.dispatchEvent(new Event('foo')); }); } diff --git a/test/parallel/test-finalization-registry-shutdown.js b/test/parallel/test-finalization-registry-shutdown.js index f896aa2f285c75..e288d8fecca7e6 100644 --- a/test/parallel/test-finalization-registry-shutdown.js +++ b/test/parallel/test-finalization-registry-shutdown.js @@ -19,5 +19,5 @@ process.on('exit', () => { // This is the final chance to execute JavaScript. register(); // Queue a FinalizationRegistryCleanupTask by a testing gc request. - global.gc(); + globalThis.gc(); }); diff --git a/test/parallel/test-fs-filehandle.js b/test/parallel/test-fs-filehandle.js index 818a3824904431..bcb62da9e4c2cc 100644 --- a/test/parallel/test-fs-filehandle.js +++ b/test/parallel/test-fs-filehandle.js @@ -35,6 +35,6 @@ common.expectWarning({ 'DeprecationWarning': [[deprecationWarning, 'DEP0137']] }); -global.gc(); +globalThis.gc(); setTimeout(() => {}, 10); diff --git a/test/parallel/test-fs-promises-file-handle-close.js b/test/parallel/test-fs-promises-file-handle-close.js index d6417964746720..288bc31ea0ada5 100644 --- a/test/parallel/test-fs-promises-file-handle-close.js +++ b/test/parallel/test-fs-promises-file-handle-close.js @@ -32,7 +32,7 @@ doOpen().then(common.mustCall((fd) => { })).then(common.mustCall(() => { setImmediate(() => { // The FileHandle should be out-of-scope and no longer accessed now. - global.gc(); + globalThis.gc(); // Wait an extra event loop turn, as the warning is emitted from the // native layer in an unref()'ed setImmediate() callback. diff --git a/test/parallel/test-fs-write-reuse-callback.js b/test/parallel/test-fs-write-reuse-callback.js index 82c772ab340fed..c80902e54103fc 100644 --- a/test/parallel/test-fs-write-reuse-callback.js +++ b/test/parallel/test-fs-write-reuse-callback.js @@ -20,7 +20,7 @@ let done = 0; const ondone = common.mustSucceed(() => { if (++done < writes) { - if (done % 25 === 0) global.gc(); + if (done % 25 === 0) globalThis.gc(); setImmediate(write); } else { assert.strictEqual( diff --git a/test/parallel/test-fs-write.js b/test/parallel/test-fs-write.js index a4aeb4e16a748f..82f3425de2aa16 100644 --- a/test/parallel/test-fs-write.js +++ b/test/parallel/test-fs-write.js @@ -39,14 +39,18 @@ const { createExternalizableString, externalizeString, isOneByteString, -} = global; +} = globalThis; + +assert.notStrictEqual(createExternalizableString, undefined); +assert.notStrictEqual(externalizeString, undefined); +assert.notStrictEqual(isOneByteString, undefined); // Account for extra globals exposed by --expose_externalize_string. common.allowGlobals( createExternalizableString, externalizeString, isOneByteString, - global.x, + globalThis.x, ); { diff --git a/test/parallel/test-gc-http-client-connaborted.js b/test/parallel/test-gc-http-client-connaborted.js index 93ca8ee4de59f1..e52a555d788085 100644 --- a/test/parallel/test-gc-http-client-connaborted.js +++ b/test/parallel/test-gc-http-client-connaborted.js @@ -53,7 +53,7 @@ setImmediate(status); function status() { if (done > 0) { createClients = false; - global.gc(); + globalThis.gc(); console.log(`done/collected/total: ${done}/${countGC}/${count}`); if (countGC === count) { server.close(); diff --git a/test/parallel/test-gc-net-timeout.js b/test/parallel/test-gc-net-timeout.js index c4f74b34b79ec9..7a195c26bcdd6b 100644 --- a/test/parallel/test-gc-net-timeout.js +++ b/test/parallel/test-gc-net-timeout.js @@ -64,7 +64,7 @@ setImmediate(status); function status() { if (done > 0) { createClients = false; - global.gc(); + globalThis.gc(); console.log(`done/collected/total: ${done}/${countGC}/${count}`); if (countGC === count) { server.close(); diff --git a/test/parallel/test-gc-tls-external-memory.js b/test/parallel/test-gc-tls-external-memory.js index dcf38e11f6c6bf..480b1086b5395e 100644 --- a/test/parallel/test-gc-tls-external-memory.js +++ b/test/parallel/test-gc-tls-external-memory.js @@ -27,7 +27,7 @@ connect(); function connect() { if (runs % 64 === 0) - global.gc(); + globalThis.gc(); const externalMemoryUsage = process.memoryUsage().external; assert(externalMemoryUsage >= 0, `${externalMemoryUsage} < 0`); if (runs++ === 512) { diff --git a/test/parallel/test-global-setters.js b/test/parallel/test-global-setters.js index 7fd070ed8e1c4e..2da1097867261f 100644 --- a/test/parallel/test-global-setters.js +++ b/test/parallel/test-global-setters.js @@ -8,20 +8,20 @@ assert.strictEqual(process, _process); // eslint-disable-next-line no-global-assign process = 'asdf'; assert.strictEqual(process, 'asdf'); -assert.strictEqual(global.process, 'asdf'); -global.process = _process; +assert.strictEqual(globalThis.process, 'asdf'); +globalThis.process = _process; assert.strictEqual(process, _process); assert.strictEqual( - typeof Object.getOwnPropertyDescriptor(global, 'process').get, + typeof Object.getOwnPropertyDescriptor(globalThis, 'process').get, 'function'); assert.strictEqual(Buffer, _Buffer); // eslint-disable-next-line no-global-assign Buffer = 'asdf'; assert.strictEqual(Buffer, 'asdf'); -assert.strictEqual(global.Buffer, 'asdf'); -global.Buffer = _Buffer; +assert.strictEqual(globalThis.Buffer, 'asdf'); +globalThis.Buffer = _Buffer; assert.strictEqual(Buffer, _Buffer); assert.strictEqual( - typeof Object.getOwnPropertyDescriptor(global, 'Buffer').get, + typeof Object.getOwnPropertyDescriptor(globalThis, 'Buffer').get, 'function'); diff --git a/test/parallel/test-global.js b/test/parallel/test-global.js index 37f4db5252be5c..835bcc75a83e3b 100644 --- a/test/parallel/test-global.js +++ b/test/parallel/test-global.js @@ -60,9 +60,9 @@ for (const moduleName of builtinModules) { 'crypto', 'navigator', ]; - assert.deepStrictEqual(new Set(Object.keys(global)), new Set(expected)); + assert.deepStrictEqual(new Set(Object.keys(globalThis)), new Set(expected)); expected.forEach((value) => { - const desc = Object.getOwnPropertyDescriptor(global, value); + const desc = Object.getOwnPropertyDescriptor(globalThis, value); if (typeof desc.value === 'function') { assert.strictEqual(desc.value.name, value); } else if (typeof desc.get === 'function') { @@ -74,15 +74,15 @@ for (const moduleName of builtinModules) { common.allowGlobals('bar', 'foo'); baseFoo = 'foo'; // eslint-disable-line no-undef -global.baseBar = 'bar'; +globalThis.baseBar = 'bar'; -assert.strictEqual(global.baseFoo, 'foo', - `x -> global.x failed: global.baseFoo = ${global.baseFoo}`); +assert.strictEqual(globalThis.baseFoo, 'foo', + `x -> globalThis.x failed: globalThis.baseFoo = ${globalThis.baseFoo}`); assert.strictEqual(baseBar, // eslint-disable-line no-undef 'bar', // eslint-disable-next-line no-undef - `global.x -> x failed: baseBar = ${baseBar}`); + `globalThis.x -> x failed: baseBar = ${baseBar}`); const mod = require(fixtures.path('global', 'plain')); const fooBar = mod.fooBar; @@ -91,4 +91,4 @@ assert.strictEqual(fooBar.foo, 'foo'); assert.strictEqual(fooBar.bar, 'bar'); -assert.strictEqual(Object.prototype.toString.call(global), '[object global]'); +assert.strictEqual(Object.prototype.toString.call(globalThis), '[object global]'); diff --git a/test/parallel/test-h2leak-destroy-session-on-socket-ended.js b/test/parallel/test-h2leak-destroy-session-on-socket-ended.js index 3f0fe3e69d924d..af692b278f7d06 100644 --- a/test/parallel/test-h2leak-destroy-session-on-socket-ended.js +++ b/test/parallel/test-h2leak-destroy-session-on-socket-ended.js @@ -31,8 +31,8 @@ server.on('secureConnection', (s) => { firstServerStream = null; setImmediate(() => { - global.gc(); - global.gc(); + globalThis.gc(); + globalThis.gc(); server.close(); }); diff --git a/test/parallel/test-heapdump-async-hooks-init-promise.js b/test/parallel/test-heapdump-async-hooks-init-promise.js index c59cb89baa3d18..63b26843d1254e 100644 --- a/test/parallel/test-heapdump-async-hooks-init-promise.js +++ b/test/parallel/test-heapdump-async-hooks-init-promise.js @@ -43,4 +43,4 @@ async_hooks.createHook({ Promise.resolve().then(() => {}); -setImmediate(global.gc); +setImmediate(globalThis.gc); diff --git a/test/parallel/test-http-agent-domain-reused-gc.js b/test/parallel/test-http-agent-domain-reused-gc.js index 35146ee688eb9b..4f12c2ede839cd 100644 --- a/test/parallel/test-http-agent-domain-reused-gc.js +++ b/test/parallel/test-http-agent-domain-reused-gc.js @@ -26,7 +26,7 @@ async_hooks.createHook({ }, before(id) { if (id === reusedHandleId) { - global.gc(); + globalThis.gc(); checkBeforeCalled(); } } diff --git a/test/parallel/test-http-parser-bad-ref.js b/test/parallel/test-http-parser-bad-ref.js index 2c1bfe67485db7..e34054eca67063 100644 --- a/test/parallel/test-http-parser-bad-ref.js +++ b/test/parallel/test-http-parser-bad-ref.js @@ -18,7 +18,7 @@ let messagesComplete = 0; function flushPool() { Buffer.allocUnsafe(Buffer.poolSize - 1); - global.gc(); + globalThis.gc(); } function demoBug(part1, part2) { diff --git a/test/parallel/test-http-server-connections-checking-leak.js b/test/parallel/test-http-server-connections-checking-leak.js index 282c9a569fba7d..38dca83102cfea 100644 --- a/test/parallel/test-http-server-connections-checking-leak.js +++ b/test/parallel/test-http-server-connections-checking-leak.js @@ -20,5 +20,5 @@ for (let i = 0; i < max; i++) { } setImmediate(() => { - global.gc(); + globalThis.gc(); }); diff --git a/test/parallel/test-http-server-keepalive-req-gc.js b/test/parallel/test-http-server-keepalive-req-gc.js index 3bfb6c9600cc24..c827cd19ad7222 100644 --- a/test/parallel/test-http-server-keepalive-req-gc.js +++ b/test/parallel/test-http-server-keepalive-req-gc.js @@ -16,8 +16,8 @@ const server = createServer(common.mustCall((req, res) => { req.on('end', common.mustCall(() => { setImmediate(async () => { client.end(); - await global.gc({ type: 'major', execution: 'async' }); - await global.gc({ type: 'major', execution: 'async' }); + await globalThis.gc({ type: 'major', execution: 'async' }); + await globalThis.gc({ type: 'major', execution: 'async' }); }); })); res.end('hello world'); diff --git a/test/parallel/test-http2-createwritereq.js b/test/parallel/test-http2-createwritereq.js index 3015ad6c642801..6d2b07d5849ad0 100644 --- a/test/parallel/test-http2-createwritereq.js +++ b/test/parallel/test-http2-createwritereq.js @@ -69,7 +69,7 @@ server.listen(0, common.mustCall(function() { req.destroy = function(...args) { // Schedule a garbage collection event at the end of the current // MakeCallback() run. - process.nextTick(global.gc); + process.nextTick(globalThis.gc); return origDestroy.call(this, ...args); }; diff --git a/test/parallel/test-http2-session-gc-while-write-scheduled.js b/test/parallel/test-http2-session-gc-while-write-scheduled.js index 62379f7d7ed678..9693ded17c0a18 100644 --- a/test/parallel/test-http2-session-gc-while-write-scheduled.js +++ b/test/parallel/test-http2-session-gc-while-write-scheduled.js @@ -23,6 +23,6 @@ const tick = require('../common/tick'); // This schedules a write. client.settings(http2.getDefaultSettings()); client = null; - global.gc(); + globalThis.gc(); }); } diff --git a/test/parallel/test-http2-write-finishes-after-stream-destroy.js b/test/parallel/test-http2-write-finishes-after-stream-destroy.js index ed8833fdb926b1..bf9de8f9291917 100644 --- a/test/parallel/test-http2-write-finishes-after-stream-destroy.js +++ b/test/parallel/test-http2-write-finishes-after-stream-destroy.js @@ -9,7 +9,7 @@ const { duplexPair } = require('stream'); // Make sure the Http2Stream destructor works, since we don't clean the // stream up like we would otherwise do. -process.on('exit', global.gc); +process.on('exit', globalThis.gc); { const [ clientSide, serverSide ] = duplexPair(); diff --git a/test/parallel/test-https-server-connections-checking-leak.js b/test/parallel/test-https-server-connections-checking-leak.js index e920c8e403705f..f79149ef70a9ab 100644 --- a/test/parallel/test-https-server-connections-checking-leak.js +++ b/test/parallel/test-https-server-connections-checking-leak.js @@ -25,5 +25,5 @@ for (let i = 0; i < max; i++) { } setImmediate(() => { - global.gc(); + globalThis.gc(); }); diff --git a/test/parallel/test-inspector-scriptparsed-context.js b/test/parallel/test-inspector-scriptparsed-context.js index bd86ba53d4c986..31ae896c818b82 100644 --- a/test/parallel/test-inspector-scriptparsed-context.js +++ b/test/parallel/test-inspector-scriptparsed-context.js @@ -8,8 +8,8 @@ const script = ` 'use strict'; const assert = require('assert'); const vm = require('vm'); - global.outer = true; - global.inner = false; + globalThis.outer = true; + globalThis.inner = false; const context = vm.createContext({ outer: false, inner: true diff --git a/test/parallel/test-inspector-vm-global-accessors-getter-sideeffect.js b/test/parallel/test-inspector-vm-global-accessors-getter-sideeffect.js index 8b367e98c37f49..89414e50346871 100644 --- a/test/parallel/test-inspector-vm-global-accessors-getter-sideeffect.js +++ b/test/parallel/test-inspector-vm-global-accessors-getter-sideeffect.js @@ -14,7 +14,7 @@ session.connect(); const context = vm.createContext({ get a() { - global.foo = '1'; + globalThis.foo = '1'; return 100; } }); diff --git a/test/parallel/test-module-relative-lookup.js b/test/parallel/test-module-relative-lookup.js index 675c12c541fd4d..76af2b3b30c2e0 100644 --- a/test/parallel/test-module-relative-lookup.js +++ b/test/parallel/test-module-relative-lookup.js @@ -2,7 +2,7 @@ const common = require('../common'); const assert = require('assert'); -const _module = require('module'); // Avoid collision with global.module +const _module = require('module'); // Avoid collision with globalThis.module // Current directory gets highest priority for local modules function testFirstInPath(moduleName, isLocalModule) { diff --git a/test/parallel/test-net-connect-memleak.js b/test/parallel/test-net-connect-memleak.js index 84f643746838b6..079e45f7223a8b 100644 --- a/test/parallel/test-net-connect-memleak.js +++ b/test/parallel/test-net-connect-memleak.js @@ -49,7 +49,7 @@ const gcListener = { ongc() { collected = true; } }; } function done(sock) { - global.gc(); + globalThis.gc(); setImmediate(() => { assert.strictEqual(collected, true); sock.end(); diff --git a/test/parallel/test-net-write-fully-async-buffer.js b/test/parallel/test-net-write-fully-async-buffer.js index 0dddb51bd76ade..042dd79cb03127 100644 --- a/test/parallel/test-net-write-fully-async-buffer.js +++ b/test/parallel/test-net-write-fully-async-buffer.js @@ -23,7 +23,7 @@ const server = net.createServer(common.mustCall(function(conn) { } while (conn.write(Buffer.from(data))); - global.gc({ type: 'minor' }); + globalThis.gc({ type: 'minor' }); // The buffer allocated above should still be alive. } diff --git a/test/parallel/test-net-write-fully-async-hex-string.js b/test/parallel/test-net-write-fully-async-hex-string.js index 37b5cd75c1385c..b80b09f3244585 100644 --- a/test/parallel/test-net-write-fully-async-hex-string.js +++ b/test/parallel/test-net-write-fully-async-hex-string.js @@ -21,7 +21,7 @@ const server = net.createServer(common.mustCall(function(conn) { } while (conn.write(data, 'hex')); - global.gc({ type: 'minor' }); + globalThis.gc({ type: 'minor' }); // The buffer allocated inside the .write() call should still be alive. } diff --git a/test/parallel/test-performance-gc.js b/test/parallel/test-performance-gc.js index 9c4a3a850a22dd..9dddf7207f59d2 100644 --- a/test/parallel/test-performance-gc.js +++ b/test/parallel/test-performance-gc.js @@ -40,7 +40,7 @@ const kinds = [ obs.disconnect(); })); obs.observe({ entryTypes: ['gc'] }); - global.gc(); + globalThis.gc(); // Keep the event loop alive to witness the GC async callback happen. setImmediate(() => setImmediate(() => 0)); } @@ -51,6 +51,6 @@ const kinds = [ process.on('beforeExit', () => { assert(!didCall); didCall = true; - global.gc(); + globalThis.gc(); }); } diff --git a/test/parallel/test-primitive-timer-leak.js b/test/parallel/test-primitive-timer-leak.js index d590a0347b9cac..a0fe2765e1282d 100644 --- a/test/parallel/test-primitive-timer-leak.js +++ b/test/parallel/test-primitive-timer-leak.js @@ -5,7 +5,7 @@ const { onGC } = require('../common/gc'); // See https://github.com/nodejs/node/issues/53335 const poller = setInterval(() => { - global.gc(); + globalThis.gc(); }, 100); let count = 0; diff --git a/test/parallel/test-repl-autolibs.js b/test/parallel/test-repl-autolibs.js index 5cf3b1497221d0..a1eb476ef530b1 100644 --- a/test/parallel/test-repl-autolibs.js +++ b/test/parallel/test-repl-autolibs.js @@ -41,7 +41,7 @@ function test1() { assert.strictEqual(data, `${util.inspect(require('fs'), null, 2, false)}\n`); // Globally added lib matches required lib - assert.strictEqual(global.fs, require('fs')); + assert.strictEqual(globalThis.fs, require('fs')); test2(); } }; @@ -58,11 +58,11 @@ function test2() { // REPL response error message assert.strictEqual(data, '{}\n'); // Original value wasn't overwritten - assert.strictEqual(val, global.url); + assert.strictEqual(val, globalThis.url); } }; const val = {}; - global.url = val; + globalThis.url = val; common.allowGlobals(val); assert(!gotWrite); putIn.run(['url']); diff --git a/test/parallel/test-repl-underscore.js b/test/parallel/test-repl-underscore.js index 0f8103ce4573cd..8ce9de5563acfb 100644 --- a/test/parallel/test-repl-underscore.js +++ b/test/parallel/test-repl-underscore.js @@ -150,8 +150,8 @@ function testResetContextGlobal() { ]); // Delete globals leaked by REPL when `useGlobal` is `true` - delete global.module; - delete global.require; + delete globalThis.module; + delete globalThis.require; } function testError() { diff --git a/test/parallel/test-repl-use-global.js b/test/parallel/test-repl-use-global.js index 3457d0c5ba7210..06cda54f4d6fa2 100644 --- a/test/parallel/test-repl-use-global.js +++ b/test/parallel/test-repl-use-global.js @@ -20,10 +20,10 @@ const globalTest = (useGlobal, cb, output) => (err, repl) => { let str = ''; output.on('data', (data) => (str += data)); - global.lunch = 'tacos'; - repl.write('global.lunch;\n'); + globalThis.lunch = 'tacos'; + repl.write('globalThis.lunch;\n'); repl.close(); - delete global.lunch; + delete globalThis.lunch; cb(null, str.trim()); }; diff --git a/test/parallel/test-repl.js b/test/parallel/test-repl.js index d4ac8b6f087a3e..16cead7c7251dc 100644 --- a/test/parallel/test-repl.js +++ b/test/parallel/test-repl.js @@ -36,7 +36,7 @@ const prompt_tcp = 'node via TCP socket> '; const moduleFilename = fixtures.path('a'); // Function for REPL to run -global.invoke_me = function(arg) { +globalThis.invoke_me = function(arg) { return `invoked ${arg}`; }; @@ -939,8 +939,8 @@ alternatively use dynamic import: const { default: alias, namedExport } = await socket.end(); } - common.allowGlobals(global.invoke_me, global.message, global.a, global.blah, - global.I, global.f, global.path, global.x, global.name, global.foo); + common.allowGlobals(globalThis.invoke_me, globalThis.message, globalThis.a, globalThis.blah, + globalThis.I, globalThis.f, globalThis.path, globalThis.x, globalThis.name, globalThis.foo); })().then(common.mustCall()); function startTCPRepl() { diff --git a/test/parallel/test-runner-mock-timers.js b/test/parallel/test-runner-mock-timers.js index 87b8ba7e3784d2..da7458b4c46dd3 100644 --- a/test/parallel/test-runner-mock-timers.js +++ b/test/parallel/test-runner-mock-timers.js @@ -69,7 +69,7 @@ describe('Mock Timers Test Suite', () => { 'clearImmediate', ]; - const globalTimersDescriptors = timers.map((fn) => getDescriptor(global, fn)); + const globalTimersDescriptors = timers.map((fn) => getDescriptor(globalThis, fn)); const nodeTimersDescriptors = timers.map((fn) => getDescriptor(nodeTimers, fn)); const nodeTimersPromisesDescriptors = timers .filter((fn) => !fn.includes('clear')) @@ -116,7 +116,7 @@ describe('Mock Timers Test Suite', () => { it('should reset all timers when calling .reset function', (t) => { t.mock.timers.enable(); const fn = t.mock.fn(); - global.setTimeout(fn, 1000); + globalThis.setTimeout(fn, 1000); t.mock.timers.reset(); assert.deepStrictEqual(Date.now, globalThis.Date.now); assert.throws(() => { @@ -131,7 +131,7 @@ describe('Mock Timers Test Suite', () => { it('should reset all timers when calling Symbol.dispose', (t) => { t.mock.timers.enable(); const fn = t.mock.fn(); - global.setTimeout(fn, 1000); + globalThis.setTimeout(fn, 1000); // TODO(benjamingr) refactor to `using` t.mock.timers[Symbol.dispose](); assert.throws(() => { @@ -148,8 +148,8 @@ describe('Mock Timers Test Suite', () => { const order = []; const fn1 = t.mock.fn(() => order.push('f1')); const fn2 = t.mock.fn(() => order.push('f2')); - global.setTimeout(fn1, 1000); - global.setTimeout(fn2, 1000); + globalThis.setTimeout(fn1, 1000); + globalThis.setTimeout(fn2, 1000); t.mock.timers.tick(1000); assert.strictEqual(fn1.mock.callCount(), 1); assert.strictEqual(fn2.mock.callCount(), 1); @@ -170,11 +170,11 @@ describe('Mock Timers Test Suite', () => { const intervalFn = t.mock.fn(); t.mock.timers.enable(); - global.setTimeout(timeoutFn, 1111); - const id = global.setInterval(intervalFn, 9999); + globalThis.setTimeout(timeoutFn, 1111); + const id = globalThis.setInterval(intervalFn, 9999); t.mock.timers.runAll(); - global.clearInterval(id); + globalThis.clearInterval(id); assert.strictEqual(timeoutFn.mock.callCount(), 1); assert.strictEqual(intervalFn.mock.callCount(), 1); }); @@ -184,11 +184,11 @@ describe('Mock Timers Test Suite', () => { const intervalFn = t.mock.fn(); t.mock.timers.enable(); - global.setTimeout(timeoutFn, 1111); - const id = global.setInterval(intervalFn, 9999); + globalThis.setTimeout(timeoutFn, 1111); + const id = globalThis.setInterval(intervalFn, 9999); t.mock.timers.runAll(); - global.clearInterval(id); + globalThis.clearInterval(id); assert.strictEqual(timeoutFn.mock.callCount(), 1); assert.strictEqual(intervalFn.mock.callCount(), 1); assert.strictEqual(Date.now(), 9999); @@ -209,7 +209,7 @@ describe('Mock Timers Test Suite', () => { const fn = mock.fn(); - global.setTimeout(fn, 4000); + globalThis.setTimeout(fn, 4000); mock.timers.tick(4000); assert.strictEqual(fn.mock.callCount(), 1); @@ -220,7 +220,7 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setTimeout'] }); const fn = t.mock.fn(); - global.setTimeout(fn, 2000); + globalThis.setTimeout(fn, 2000); t.mock.timers.tick(1000); assert.strictEqual(fn.mock.callCount(), 0); @@ -234,7 +234,7 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setTimeout'] }); const fn = t.mock.fn(); const args = ['a', 'b', 'c']; - global.setTimeout(fn, 2000, ...args); + globalThis.setTimeout(fn, 2000, ...args); t.mock.timers.tick(1000); t.mock.timers.tick(500); @@ -248,7 +248,7 @@ describe('Mock Timers Test Suite', () => { const now = Date.now(); const timeout = 2; const expected = () => now - timeout; - global.setTimeout(common.mustCall(() => { + globalThis.setTimeout(common.mustCall(() => { assert.strictEqual(now - timeout, expected()); done(); }), timeout); @@ -257,7 +257,7 @@ describe('Mock Timers Test Suite', () => { it('should change timeout to 1ms when it is > TIMEOUT_MAX', (t) => { t.mock.timers.enable({ apis: ['setTimeout'] }); const fn = t.mock.fn(); - global.setTimeout(fn, TIMEOUT_MAX + 1); + globalThis.setTimeout(fn, TIMEOUT_MAX + 1); t.mock.timers.tick(1); assert.strictEqual(fn.mock.callCount(), 1); }); @@ -265,7 +265,7 @@ describe('Mock Timers Test Suite', () => { it('should change the delay to one if timeout < 0', (t) => { t.mock.timers.enable({ apis: ['setTimeout'] }); const fn = t.mock.fn(); - global.setTimeout(fn, -1); + globalThis.setTimeout(fn, -1); t.mock.timers.tick(1); assert.strictEqual(fn.mock.callCount(), 1); }); @@ -277,8 +277,8 @@ describe('Mock Timers Test Suite', () => { const fn = mock.fn(); - const id = global.setTimeout(fn, 4000); - global.clearTimeout(id); + const id = globalThis.setTimeout(fn, 4000); + globalThis.clearTimeout(id); t.mock.timers.tick(4000); assert.strictEqual(fn.mock.callCount(), 0); @@ -297,13 +297,13 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setInterval'] }); const fn = t.mock.fn(); - const id = global.setInterval(fn, 200); + const id = globalThis.setInterval(fn, 200); t.mock.timers.tick(200); t.mock.timers.tick(200); t.mock.timers.tick(200); - global.clearInterval(id); + globalThis.clearInterval(id); assert.strictEqual(fn.mock.callCount(), 3); }); @@ -312,13 +312,13 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setInterval'] }); const fn = t.mock.fn(); const args = ['a', 'b', 'c']; - const id = global.setInterval(fn, 200, ...args); + const id = globalThis.setInterval(fn, 200, ...args); t.mock.timers.tick(200); t.mock.timers.tick(200); t.mock.timers.tick(200); - global.clearInterval(id); + globalThis.clearInterval(id); assert.strictEqual(fn.mock.callCount(), 3); assert.deepStrictEqual(fn.mock.calls[0].arguments, args); @@ -332,8 +332,8 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setInterval'] }); const fn = mock.fn(); - const id = global.setInterval(fn, 200); - global.clearInterval(id); + const id = globalThis.setInterval(fn, 200); + globalThis.clearInterval(id); t.mock.timers.tick(200); assert.strictEqual(fn.mock.callCount(), 0); @@ -352,7 +352,7 @@ describe('Mock Timers Test Suite', () => { const now = Date.now(); const timeout = 2; const expected = () => now - timeout; - global.setImmediate(common.mustCall(() => { + globalThis.setImmediate(common.mustCall(() => { assert.strictEqual(now - timeout, expected()); done(); })); @@ -362,7 +362,7 @@ describe('Mock Timers Test Suite', () => { t.mock.timers.enable({ apis: ['setImmediate'] }); const fn = t.mock.fn(); const args = ['a', 'b', 'c']; - global.setImmediate(fn, ...args); + globalThis.setImmediate(fn, ...args); t.mock.timers.tick(9999); assert.strictEqual(fn.mock.callCount(), 1); @@ -372,14 +372,14 @@ describe('Mock Timers Test Suite', () => { it('should not advance in time if clearImmediate was invoked', (t) => { t.mock.timers.enable({ apis: ['setImmediate'] }); - const id = global.setImmediate(common.mustNotCall()); - global.clearImmediate(id); + const id = globalThis.setImmediate(common.mustNotCall()); + globalThis.clearImmediate(id); t.mock.timers.tick(200); }); it('should advance in time and trigger timers when calling the .tick function', (t) => { t.mock.timers.enable({ apis: ['setImmediate'] }); - global.setImmediate(common.mustCall(1)); + globalThis.setImmediate(common.mustCall(1)); t.mock.timers.tick(0); }); @@ -389,8 +389,8 @@ describe('Mock Timers Test Suite', () => { const fn1 = t.mock.fn(common.mustCall(() => order.push('f1'), 1)); const fn2 = t.mock.fn(common.mustCall(() => order.push('f2'), 1)); - global.setImmediate(fn1); - global.setImmediate(fn2); + globalThis.setImmediate(fn1); + globalThis.setImmediate(fn2); t.mock.timers.tick(0); @@ -403,8 +403,8 @@ describe('Mock Timers Test Suite', () => { const fn1 = t.mock.fn(common.mustCall(() => order.push('f1'), 1)); const fn2 = t.mock.fn(common.mustCall(() => order.push('f2'), 1)); - global.setTimeout(fn2, 0); - global.setImmediate(fn1); + globalThis.setTimeout(fn2, 0); + globalThis.setImmediate(fn1); t.mock.timers.tick(100); diff --git a/test/parallel/test-timers-api-refs.js b/test/parallel/test-timers-api-refs.js index 3c55a05ac4c20a..a6a541963110bb 100644 --- a/test/parallel/test-timers-api-refs.js +++ b/test/parallel/test-timers-api-refs.js @@ -4,12 +4,12 @@ const timers = require('timers'); // Delete global APIs to make sure they're not relied on by the internal timers // code -delete global.setTimeout; -delete global.clearTimeout; -delete global.setInterval; -delete global.clearInterval; -delete global.setImmediate; -delete global.clearImmediate; +delete globalThis.setTimeout; +delete globalThis.clearTimeout; +delete globalThis.setInterval; +delete globalThis.clearInterval; +delete globalThis.setImmediate; +delete globalThis.clearImmediate; const timeoutCallback = () => { timers.clearTimeout(timeout); }; const timeout = timers.setTimeout(common.mustCall(timeoutCallback), 1); diff --git a/test/parallel/test-timers-process-tampering.js b/test/parallel/test-timers-process-tampering.js index 766cc9f3560c82..8632e7c96fa086 100644 --- a/test/parallel/test-timers-process-tampering.js +++ b/test/parallel/test-timers-process-tampering.js @@ -3,6 +3,6 @@ 'use strict'; const common = require('../common'); -global.process = {}; // Boom! -common.allowGlobals(global.process); +globalThis.process = {}; // Boom! +common.allowGlobals(globalThis.process); setImmediate(common.mustCall()); diff --git a/test/parallel/test-tls-connect-memleak.js b/test/parallel/test-tls-connect-memleak.js index 5bdcbe89f785f6..7b9cb71d8df0ba 100644 --- a/test/parallel/test-tls-connect-memleak.js +++ b/test/parallel/test-tls-connect-memleak.js @@ -57,7 +57,7 @@ const gcListener = { ongc() { collected = true; } }; } function done(sock) { - global.gc(); + globalThis.gc(); setImmediate(() => { assert.strictEqual(collected, true); sock.end(); diff --git a/test/parallel/test-tls-securepair-leak.js b/test/parallel/test-tls-securepair-leak.js index 98bdcde76ec034..e3d5c2cdf37b5d 100644 --- a/test/parallel/test-tls-securepair-leak.js +++ b/test/parallel/test-tls-securepair-leak.js @@ -17,7 +17,7 @@ const before = process.memoryUsage().external; createSecurePair(context, false, false, false, options).destroy(); } setImmediate(() => { - global.gc(); + globalThis.gc(); const after = process.memoryUsage().external; // It's not an exact science but a SecurePair grows .external by about 45 KiB. diff --git a/test/parallel/test-tls-transport-destroy-after-own-gc.js b/test/parallel/test-tls-transport-destroy-after-own-gc.js index 17c494ca0b79d1..bcac2c6ebde2b8 100644 --- a/test/parallel/test-tls-transport-destroy-after-own-gc.js +++ b/test/parallel/test-tls-transport-destroy-after-own-gc.js @@ -19,11 +19,11 @@ let clientTLSHandle = clientTLS._handle; // eslint-disable-line no-unused-vars setImmediate(() => { clientTLS = null; - global.gc(); + globalThis.gc(); clientTLSHandle = null; - global.gc(); + globalThis.gc(); setImmediate(() => { clientSide = null; - global.gc(); + globalThis.gc(); }); }); diff --git a/test/parallel/test-trace-events-api.js b/test/parallel/test-trace-events-api.js index 8792a40cf00c80..9bffb3b78c4ba3 100644 --- a/test/parallel/test-trace-events-api.js +++ b/test/parallel/test-trace-events-api.js @@ -109,7 +109,7 @@ if (isChild) { assert.strictEqual(getEnabledCategories(), 'abc'); tracing3 = undefined; } - global.gc(); + globalThis.gc(); assert.strictEqual(getEnabledCategories(), 'abc'); // Not able to disable the thing after this point, however. } diff --git a/test/parallel/test-util-format.js b/test/parallel/test-util-format.js index 8d2cab5a9c7a1c..6f222d0fea0fb8 100644 --- a/test/parallel/test-util-format.js +++ b/test/parallel/test-util-format.js @@ -197,9 +197,9 @@ assert.strictEqual(util.format('%s', -Infinity), '-Infinity'); util.format('%s', Object.setPrototypeOf(new Foo(), null)), '[Foo: null prototype] {}' ); - global.Foo = Foo; + globalThis.Foo = Foo; assert.strictEqual(util.format('%s', new Foo()), 'Bar'); - delete global.Foo; + delete globalThis.Foo; class Bar { abc = true; } assert.strictEqual(util.format('%s', new Bar()), 'Bar { abc: true }'); class Foobar extends Array { aaa = true; } diff --git a/test/parallel/test-util-inspect.js b/test/parallel/test-util-inspect.js index e84d80073bb3ad..48aaad977282fb 100644 --- a/test/parallel/test-util-inspect.js +++ b/test/parallel/test-util-inspect.js @@ -1265,9 +1265,9 @@ if (typeof Symbol !== 'undefined') { // a bonafide native Promise. { const oldPromise = Promise; - global.Promise = function() { this.bar = 42; }; + globalThis.Promise = function() { this.bar = 42; }; assert.strictEqual(util.inspect(new Promise()), '{ bar: 42 }'); - global.Promise = oldPromise; + globalThis.Promise = oldPromise; } // Test Map iterators. @@ -3181,7 +3181,7 @@ assert.strictEqual( } // Consistency check. - assert(fullObjectGraph(global).has(Function.prototype)); + assert(fullObjectGraph(globalThis).has(Function.prototype)); } { diff --git a/test/parallel/test-vm-basic.js b/test/parallel/test-vm-basic.js index 93c3fbaea631ab..5687987faeb0b9 100644 --- a/test/parallel/test-vm-basic.js +++ b/test/parallel/test-vm-basic.js @@ -59,9 +59,9 @@ const vm = require('vm'); const result = vm.runInThisContext( 'vmResult = "foo"; Object.prototype.toString.call(process);' ); - assert.strictEqual(global.vmResult, 'foo'); + assert.strictEqual(globalThis.vmResult, 'foo'); assert.strictEqual(result, '[object process]'); - delete global.vmResult; + delete globalThis.vmResult; } // vm.runInNewContext @@ -69,7 +69,7 @@ const vm = require('vm'); const result = vm.runInNewContext( 'vmResult = "foo"; typeof process;' ); - assert.strictEqual(global.vmResult, undefined); + assert.strictEqual(globalThis.vmResult, undefined); assert.strictEqual(result, 'undefined'); } diff --git a/test/parallel/test-vm-create-and-run-in-context.js b/test/parallel/test-vm-create-and-run-in-context.js index bd746cf2df7080..314ab9525743dc 100644 --- a/test/parallel/test-vm-create-and-run-in-context.js +++ b/test/parallel/test-vm-create-and-run-in-context.js @@ -45,6 +45,6 @@ assert.strictEqual(context.thing, 'lala'); // Run in contextified sandbox without referencing the context const sandbox = { x: 1 }; vm.createContext(sandbox); -global.gc(); +globalThis.gc(); vm.runInContext('x = 2', sandbox); // Should not crash. diff --git a/test/parallel/test-vm-cross-context.js b/test/parallel/test-vm-cross-context.js index b7cf1309d3689f..abdfde32a8d847 100644 --- a/test/parallel/test-vm-cross-context.js +++ b/test/parallel/test-vm-cross-context.js @@ -23,7 +23,7 @@ require('../common'); const vm = require('vm'); -const ctx = vm.createContext(global); +const ctx = vm.createContext(globalThis); // Should not throw. vm.runInContext('!function() { var x = console.log; }()', ctx); diff --git a/test/parallel/test-vm-global-get-own.js b/test/parallel/test-vm-global-get-own.js index 246fcbf866b8b6..de5e0a9619af65 100644 --- a/test/parallel/test-vm-global-get-own.js +++ b/test/parallel/test-vm-global-get-own.js @@ -9,7 +9,7 @@ const vm = require('vm'); // Related to: // - https://github.com/nodejs/node/issues/45983 -const global = vm.runInContext('this', vm.createContext()); +const contextGlobal = vm.runInContext('this', vm.createContext()); function runAssertions(data, property, viaDefine, value1, value2, value3) { // Define the property for the first time @@ -35,20 +35,20 @@ function runAssertionsOnSandbox(builder) { } // Assertions on: define property -runAssertions(global, 'toto', true, 1, 2, 3); -runAssertions(global, Symbol.for('toto'), true, 1, 2, 3); -runAssertions(global, 'tutu', true, fun1, fun2, fun3); -runAssertions(global, Symbol.for('tutu'), true, fun1, fun2, fun3); -runAssertions(global, 'tyty', true, fun1, 2, 3); -runAssertions(global, Symbol.for('tyty'), true, fun1, 2, 3); +runAssertions(contextGlobal, 'toto', true, 1, 2, 3); +runAssertions(contextGlobal, Symbol.for('toto'), true, 1, 2, 3); +runAssertions(contextGlobal, 'tutu', true, fun1, fun2, fun3); +runAssertions(contextGlobal, Symbol.for('tutu'), true, fun1, fun2, fun3); +runAssertions(contextGlobal, 'tyty', true, fun1, 2, 3); +runAssertions(contextGlobal, Symbol.for('tyty'), true, fun1, 2, 3); // Assertions on: direct assignment -runAssertions(global, 'titi', false, 1, 2, 3); -runAssertions(global, Symbol.for('titi'), false, 1, 2, 3); -runAssertions(global, 'tata', false, fun1, fun2, fun3); -runAssertions(global, Symbol.for('tata'), false, fun1, fun2, fun3); -runAssertions(global, 'tztz', false, fun1, 2, 3); -runAssertions(global, Symbol.for('tztz'), false, fun1, 2, 3); +runAssertions(contextGlobal, 'titi', false, 1, 2, 3); +runAssertions(contextGlobal, Symbol.for('titi'), false, 1, 2, 3); +runAssertions(contextGlobal, 'tata', false, fun1, fun2, fun3); +runAssertions(contextGlobal, Symbol.for('tata'), false, fun1, fun2, fun3); +runAssertions(contextGlobal, 'tztz', false, fun1, 2, 3); +runAssertions(contextGlobal, Symbol.for('tztz'), false, fun1, 2, 3); // Assertions on: define property from sandbox runAssertionsOnSandbox( diff --git a/test/parallel/test-vm-measure-memory-lazy.js b/test/parallel/test-vm-measure-memory-lazy.js index 513cfbc3672451..7f85f8d6ca9656 100644 --- a/test/parallel/test-vm-measure-memory-lazy.js +++ b/test/parallel/test-vm-measure-memory-lazy.js @@ -10,28 +10,28 @@ const vm = require('vm'); expectExperimentalWarning(); -// Test lazy memory measurement - we will need to global.gc() +// Test lazy memory measurement - we will need to globalThis.gc() // or otherwise these may not resolve. { vm.measureMemory() .then(common.mustCall(assertSummaryShape)); - global.gc(); + globalThis.gc(); } { vm.measureMemory({}) .then(common.mustCall(assertSummaryShape)); - global.gc(); + globalThis.gc(); } { vm.measureMemory({ mode: 'summary' }) .then(common.mustCall(assertSummaryShape)); - global.gc(); + globalThis.gc(); } { vm.measureMemory({ mode: 'detailed' }) .then(common.mustCall(assertSummaryShape)); - global.gc(); + globalThis.gc(); } diff --git a/test/parallel/test-vm-module-basic.js b/test/parallel/test-vm-module-basic.js index cba1e037ac455a..53fed6536079a0 100644 --- a/test/parallel/test-vm-module-basic.js +++ b/test/parallel/test-vm-module-basic.js @@ -37,15 +37,15 @@ const util = require('util'); (async () => { const m = new SourceTextModule(` - global.vmResultFoo = "foo"; - global.vmResultTypeofProcess = Object.prototype.toString.call(process); + globalThis.vmResultFoo = "foo"; + globalThis.vmResultTypeofProcess = Object.prototype.toString.call(process); `); await m.link(common.mustNotCall()); await m.evaluate(); - assert.strictEqual(global.vmResultFoo, 'foo'); - assert.strictEqual(global.vmResultTypeofProcess, '[object process]'); - delete global.vmResultFoo; - delete global.vmResultTypeofProcess; + assert.strictEqual(globalThis.vmResultFoo, 'foo'); + assert.strictEqual(globalThis.vmResultTypeofProcess, '[object process]'); + delete globalThis.vmResultFoo; + delete globalThis.vmResultTypeofProcess; })().then(common.mustCall()); (async () => { diff --git a/test/parallel/test-vm-new-script-new-context.js b/test/parallel/test-vm-new-script-new-context.js index 482b4130d615d9..b4221d81d98dcb 100644 --- a/test/parallel/test-vm-new-script-new-context.js +++ b/test/parallel/test-vm-new-script-new-context.js @@ -49,43 +49,43 @@ const Script = require('vm').Script; } { - global.hello = 5; + globalThis.hello = 5; const script = new Script('hello = 2'); script.runInNewContext(); - assert.strictEqual(global.hello, 5); + assert.strictEqual(globalThis.hello, 5); // Cleanup - delete global.hello; + delete globalThis.hello; } { - global.code = 'foo = 1;' + + globalThis.code = 'foo = 1;' + 'bar = 2;' + 'if (baz !== 3) throw new Error(\'test fail\');'; - global.foo = 2; - global.obj = { foo: 0, baz: 3 }; - const script = new Script(global.code); + globalThis.foo = 2; + globalThis.obj = { foo: 0, baz: 3 }; + const script = new Script(globalThis.code); /* eslint-disable no-unused-vars */ - const baz = script.runInNewContext(global.obj); + const baz = script.runInNewContext(globalThis.obj); /* eslint-enable no-unused-vars */ - assert.strictEqual(global.obj.foo, 1); - assert.strictEqual(global.obj.bar, 2); - assert.strictEqual(global.foo, 2); + assert.strictEqual(globalThis.obj.foo, 1); + assert.strictEqual(globalThis.obj.bar, 2); + assert.strictEqual(globalThis.foo, 2); // cleanup - delete global.code; - delete global.foo; - delete global.obj; + delete globalThis.code; + delete globalThis.foo; + delete globalThis.obj; } { const script = new Script('f()'); - function changeFoo() { global.foo = 100; } + function changeFoo() { globalThis.foo = 100; } script.runInNewContext({ f: changeFoo }); - assert.strictEqual(global.foo, 100); + assert.strictEqual(globalThis.foo, 100); // cleanup - delete global.foo; + delete globalThis.foo; } { diff --git a/test/parallel/test-vm-new-script-this-context.js b/test/parallel/test-vm-new-script-this-context.js index 18f39f9086ae2a..30b220e3d4a2c2 100644 --- a/test/parallel/test-vm-new-script-this-context.js +++ b/test/parallel/test-vm-new-script-this-context.js @@ -35,34 +35,34 @@ assert.throws(() => { script.runInThisContext(script); }, /^Error: test$/); -global.hello = 5; +globalThis.hello = 5; script = new Script('hello = 2'); script.runInThisContext(script); -assert.strictEqual(global.hello, 2); +assert.strictEqual(globalThis.hello, 2); // Pass values -global.code = 'foo = 1;' + +globalThis.code = 'foo = 1;' + 'bar = 2;' + 'if (typeof baz !== "undefined") throw new Error("test fail");'; -global.foo = 2; -global.obj = { foo: 0, baz: 3 }; -script = new Script(global.code); +globalThis.foo = 2; +globalThis.obj = { foo: 0, baz: 3 }; +script = new Script(globalThis.code); script.runInThisContext(script); -assert.strictEqual(global.obj.foo, 0); -assert.strictEqual(global.bar, 2); -assert.strictEqual(global.foo, 1); +assert.strictEqual(globalThis.obj.foo, 0); +assert.strictEqual(globalThis.bar, 2); +assert.strictEqual(globalThis.foo, 1); // Call a function -global.f = function() { global.foo = 100; }; +globalThis.f = function() { globalThis.foo = 100; }; script = new Script('f()'); script.runInThisContext(script); -assert.strictEqual(global.foo, 100); +assert.strictEqual(globalThis.foo, 100); common.allowGlobals( - global.hello, - global.code, - global.foo, - global.obj, - global.f + globalThis.hello, + globalThis.code, + globalThis.foo, + globalThis.obj, + globalThis.f ); diff --git a/test/parallel/test-vm-run-in-new-context.js b/test/parallel/test-vm-run-in-new-context.js index 6e8c42812bbc88..c6f8fbf893ca9a 100644 --- a/test/parallel/test-vm-run-in-new-context.js +++ b/test/parallel/test-vm-run-in-new-context.js @@ -26,7 +26,7 @@ const common = require('../common'); const assert = require('assert'); const vm = require('vm'); -if (typeof global.gc !== 'function') +if (typeof globalThis.gc !== 'function') assert.fail('Run this test with --expose-gc'); // Run a string @@ -38,28 +38,28 @@ assert.throws(() => { vm.runInNewContext('throw new Error(\'test\');'); }, /^Error: test$/); -global.hello = 5; +globalThis.hello = 5; vm.runInNewContext('hello = 2'); -assert.strictEqual(global.hello, 5); +assert.strictEqual(globalThis.hello, 5); // Pass values in and out -global.code = 'foo = 1;' + +globalThis.code = 'foo = 1;' + 'bar = 2;' + 'if (baz !== 3) throw new Error(\'test fail\');'; -global.foo = 2; -global.obj = { foo: 0, baz: 3 }; +globalThis.foo = 2; +globalThis.obj = { foo: 0, baz: 3 }; /* eslint-disable no-unused-vars */ -const baz = vm.runInNewContext(global.code, global.obj); +const baz = vm.runInNewContext(globalThis.code, globalThis.obj); /* eslint-enable no-unused-vars */ -assert.strictEqual(global.obj.foo, 1); -assert.strictEqual(global.obj.bar, 2); -assert.strictEqual(global.foo, 2); +assert.strictEqual(globalThis.obj.foo, 1); +assert.strictEqual(globalThis.obj.bar, 2); +assert.strictEqual(globalThis.foo, 2); // Call a function by reference -function changeFoo() { global.foo = 100; } +function changeFoo() { globalThis.foo = 100; } vm.runInNewContext('f()', { f: changeFoo }); -assert.strictEqual(global.foo, 100); +assert.strictEqual(globalThis.foo, 100); // Modify an object by reference const f = { a: 1 }; @@ -68,7 +68,7 @@ assert.strictEqual(f.a, 2); // Use function in context without referencing context const fn = vm.runInNewContext('(function() { obj.p = {}; })', { obj: {} }); -global.gc(); +globalThis.gc(); fn(); // Should not crash @@ -93,8 +93,8 @@ for (const arg of [filename, { filename }]) { } common.allowGlobals( - global.hello, - global.code, - global.foo, - global.obj + globalThis.hello, + globalThis.code, + globalThis.foo, + globalThis.obj ); diff --git a/test/parallel/test-vm-static-this.js b/test/parallel/test-vm-static-this.js index e9382d6c3b4c1a..f47c0b5d0d056a 100644 --- a/test/parallel/test-vm-static-this.js +++ b/test/parallel/test-vm-static-this.js @@ -33,9 +33,9 @@ assert.throws(function() { vm.runInThisContext('throw new Error(\'test\');'); }, /test/); -global.hello = 5; +globalThis.hello = 5; vm.runInThisContext('hello = 2'); -assert.strictEqual(global.hello, 2); +assert.strictEqual(globalThis.hello, 2); // pass values @@ -43,23 +43,23 @@ const code = 'foo = 1;' + 'bar = 2;' + 'if (typeof baz !== \'undefined\')' + 'throw new Error(\'test fail\');'; -global.foo = 2; -global.obj = { foo: 0, baz: 3 }; +globalThis.foo = 2; +globalThis.obj = { foo: 0, baz: 3 }; /* eslint-disable no-unused-vars */ const baz = vm.runInThisContext(code); /* eslint-enable no-unused-vars */ -assert.strictEqual(global.obj.foo, 0); -assert.strictEqual(global.bar, 2); -assert.strictEqual(global.foo, 1); +assert.strictEqual(globalThis.obj.foo, 0); +assert.strictEqual(globalThis.bar, 2); +assert.strictEqual(globalThis.foo, 1); // call a function -global.f = function() { global.foo = 100; }; +globalThis.f = function() { globalThis.foo = 100; }; vm.runInThisContext('f()'); -assert.strictEqual(global.foo, 100); +assert.strictEqual(globalThis.foo, 100); common.allowGlobals( - global.hello, - global.foo, - global.obj, - global.f + globalThis.hello, + globalThis.foo, + globalThis.obj, + globalThis.f ); diff --git a/test/parallel/test-webstorage.js b/test/parallel/test-webstorage.js index 4da6b67bd2932b..7f9fe8dfa53391 100644 --- a/test/parallel/test-webstorage.js +++ b/test/parallel/test-webstorage.js @@ -69,7 +69,7 @@ test('sessionStorage is not persisted', async () => { test('localStorage throws without --localstorage-file ', async () => { const cp = await spawnPromisified(process.execPath, [ '--experimental-webstorage', - '-pe', 'localStorage === global.localStorage', + '-pe', 'localStorage === globalThis.localStorage', ]); assert.strictEqual(cp.code, 1); assert.strictEqual(cp.signal, null); @@ -81,7 +81,7 @@ test('localStorage is not persisted if it is unused', async () => { const cp = await spawnPromisified(process.execPath, [ '--experimental-webstorage', '--localstorage-file', nextLocalStorage(), - '-pe', 'localStorage === global.localStorage', + '-pe', 'localStorage === globalThis.localStorage', ]); assert.strictEqual(cp.code, 0); assert.match(cp.stdout, /true/); diff --git a/test/parallel/test-whatwg-url-custom-global.js b/test/parallel/test-whatwg-url-custom-global.js index b99dfd8f3e7d94..16efdfa8df1174 100644 --- a/test/parallel/test-whatwg-url-custom-global.js +++ b/test/parallel/test-whatwg-url-custom-global.js @@ -6,7 +6,7 @@ require('../common'); const assert = require('assert'); assert.deepStrictEqual( - Object.getOwnPropertyDescriptor(global, 'URL'), + Object.getOwnPropertyDescriptor(globalThis, 'URL'), { value: URL, writable: true, @@ -16,7 +16,7 @@ assert.deepStrictEqual( ); assert.deepStrictEqual( - Object.getOwnPropertyDescriptor(global, 'URLSearchParams'), + Object.getOwnPropertyDescriptor(globalThis, 'URLSearchParams'), { value: URLSearchParams, writable: true, diff --git a/test/parallel/test-worker-cli-options.js b/test/parallel/test-worker-cli-options.js index 0c243d251e97bc..3e6ab46db6ea74 100644 --- a/test/parallel/test-worker-cli-options.js +++ b/test/parallel/test-worker-cli-options.js @@ -8,7 +8,7 @@ const CODE = ` // If the --expose-internals flag does not pass to worker // require function will throw an error require('internal/options'); -global.gc(); +globalThis.gc(); `; // Test if the flags is passed to worker threads correctly diff --git a/test/parallel/test-worker-message-channel-sharedarraybuffer.js b/test/parallel/test-worker-message-channel-sharedarraybuffer.js index 220aa978b12051..6ee577d447ec97 100644 --- a/test/parallel/test-worker-message-channel-sharedarraybuffer.js +++ b/test/parallel/test-worker-message-channel-sharedarraybuffer.js @@ -19,7 +19,7 @@ const { Worker } = require('worker_threads'); `, { eval: true }); w.on('message', common.mustCall(() => { assert.strictEqual(local.toString(), 'Hello world!'); - global.gc(); + globalThis.gc(); w.terminate(); })); w.postMessage({ sharedArrayBuffer }); diff --git a/test/parallel/test-worker-message-port-move.js b/test/parallel/test-worker-message-port-move.js index 44efd2e6a6b94f..b8db31b88c7bc4 100644 --- a/test/parallel/test-worker-message-port-move.js +++ b/test/parallel/test-worker-message-port-move.js @@ -48,7 +48,7 @@ vm.runInContext('(' + function() { { let threw = false; try { - port.postMessage(global); + port.postMessage(globalThis); } catch (e) { assert.strictEqual(e.constructor.name, 'DOMException'); assert(e instanceof Object); diff --git a/test/parallel/test-worker-workerdata-sharedarraybuffer.js b/test/parallel/test-worker-workerdata-sharedarraybuffer.js index 4e3d508ac94941..4f1b332461280f 100644 --- a/test/parallel/test-worker-workerdata-sharedarraybuffer.js +++ b/test/parallel/test-worker-workerdata-sharedarraybuffer.js @@ -23,7 +23,7 @@ const { Worker } = require('worker_threads'); }); w.on('message', common.mustCall(() => { assert.strictEqual(local.toString(), 'Hello world!'); - global.gc(); + globalThis.gc(); w.terminate(); })); w.postMessage({}); diff --git a/test/parallel/test-zlib-invalid-input-memory.js b/test/parallel/test-zlib-invalid-input-memory.js index 9761e4bbf097d8..ac718395dae184 100644 --- a/test/parallel/test-zlib-invalid-input-memory.js +++ b/test/parallel/test-zlib-invalid-input-memory.js @@ -17,7 +17,7 @@ const ongc = common.mustCall(); strm.once('error', common.mustCall((err) => { assert(err); setImmediate(() => { - global.gc(); + globalThis.gc(); // Keep the event loop alive for seeing the async_hooks destroy hook // we use for GC tracking... // TODO(addaleax): This should maybe not be necessary? diff --git a/test/parallel/test-zlib-unused-weak.js b/test/parallel/test-zlib-unused-weak.js index 2c1e2d729030dd..cd1ab91ceb5c4b 100644 --- a/test/parallel/test-zlib-unused-weak.js +++ b/test/parallel/test-zlib-unused-weak.js @@ -6,12 +6,12 @@ const zlib = require('zlib'); // Tests that native zlib handles start out their life as weak handles. -global.gc(); +globalThis.gc(); const before = process.memoryUsage().external; for (let i = 0; i < 100; ++i) zlib.createGzip(); const afterCreation = process.memoryUsage().external; -global.gc(); +globalThis.gc(); const afterGC = process.memoryUsage().external; assert((afterGC - before) / (afterCreation - before) <= 0.05, From bcb35c3fb79141913619063c940f57dc231ba421 Mon Sep 17 00:00:00 2001 From: yamachu Date: Wed, 22 Jan 2025 20:21:46 +0900 Subject: [PATCH 130/158] test: add test that uses multibyte for path and resolves modules PR-URL: https://github.com/nodejs/node/pull/56696 Fixes: https://github.com/nodejs/node/issues/56650 Refs: https://github.com/nodejs/node/pull/56657 Reviewed-By: James M Snell Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca --- .../experimental.json" | 3 +++ .../test-module-create-require-multibyte.js | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 "test/fixtures/copy/utf/\346\226\260\345\273\272\346\226\207\344\273\266\345\244\271/experimental.json" create mode 100644 test/parallel/test-module-create-require-multibyte.js diff --git "a/test/fixtures/copy/utf/\346\226\260\345\273\272\346\226\207\344\273\266\345\244\271/experimental.json" "b/test/fixtures/copy/utf/\346\226\260\345\273\272\346\226\207\344\273\266\345\244\271/experimental.json" new file mode 100644 index 00000000000000..12611d2385a5a5 --- /dev/null +++ "b/test/fixtures/copy/utf/\346\226\260\345\273\272\346\226\207\344\273\266\345\244\271/experimental.json" @@ -0,0 +1,3 @@ +{ + "ofLife": 42 +} diff --git a/test/parallel/test-module-create-require-multibyte.js b/test/parallel/test-module-create-require-multibyte.js new file mode 100644 index 00000000000000..f9c4b6345dc59e --- /dev/null +++ b/test/parallel/test-module-create-require-multibyte.js @@ -0,0 +1,24 @@ +'use strict'; + +require('../common'); +const fixtures = require('../common/fixtures'); +const assert = require('assert'); + +// This test ensures that the module can be resolved +// even if the path given to createRequire contains multibyte characters. + +const { createRequire } = require('module'); + +{ + const u = fixtures.fileURL('あ.js'); + + const reqToo = createRequire(u); + assert.deepStrictEqual(reqToo('./experimental'), { ofLife: 42 }); +} + +{ + const u = fixtures.fileURL('copy/utf/新建文件夹/index.js'); + + const reqToo = createRequire(u); + assert.deepStrictEqual(reqToo('./experimental'), { ofLife: 42 }); +} From 872d68d87c4711a9806c7daca231de026dd99234 Mon Sep 17 00:00:00 2001 From: yamachu Date: Wed, 22 Jan 2025 20:24:38 +0900 Subject: [PATCH 131/158] src: fix to generate path from wchar_t via wstring Take a similar approach to node_file and allow the creation of paths code point must be specified to convert from wchar_t to utf8. PR-URL: https://github.com/nodejs/node/pull/56696 Fixes: https://github.com/nodejs/node/issues/56650 Refs: https://github.com/nodejs/node/pull/56657 Reviewed-By: James M Snell Reviewed-By: Yagiz Nizipli Reviewed-By: Luigi Pinca --- src/node_file.cc | 15 +-------------- src/node_modules.cc | 24 ++++++++++++++++++++---- src/util-inl.h | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 18 deletions(-) diff --git a/src/node_file.cc b/src/node_file.cc index 984bc55ee9b941..8e29bb39887625 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -3146,21 +3146,8 @@ static void GetFormatOfExtensionlessFile( } #ifdef _WIN32 -std::wstring ConvertToWideString(const std::string& str) { - int size_needed = MultiByteToWideChar( - CP_UTF8, 0, &str[0], static_cast(str.size()), nullptr, 0); - std::wstring wstrTo(size_needed, 0); - MultiByteToWideChar(CP_UTF8, - 0, - &str[0], - static_cast(str.size()), - &wstrTo[0], - size_needed); - return wstrTo; -} - #define BufferValueToPath(str) \ - std::filesystem::path(ConvertToWideString(str.ToString())) + std::filesystem::path(ConvertToWideString(str.ToString(), CP_UTF8)) std::string ConvertWideToUTF8(const std::wstring& wstr) { if (wstr.empty()) return std::string(); diff --git a/src/node_modules.cc b/src/node_modules.cc index 85c8e21cf026ff..38d2c65c7f3282 100644 --- a/src/node_modules.cc +++ b/src/node_modules.cc @@ -349,8 +349,16 @@ void BindingData::GetNearestParentPackageJSON( path_value_str.push_back(kPathSeparator); } - auto package_json = - TraverseParent(realm, std::filesystem::path(path_value_str)); + std::filesystem::path path; + +#ifdef _WIN32 + std::wstring wide_path = ConvertToWideString(path_value_str, GetACP()); + path = std::filesystem::path(wide_path); +#else + path = std::filesystem::path(path_value_str); +#endif + + auto package_json = TraverseParent(realm, path); if (package_json != nullptr) { args.GetReturnValue().Set(package_json->Serialize(realm)); @@ -375,8 +383,16 @@ void BindingData::GetNearestParentPackageJSONType( path_value_str.push_back(kPathSeparator); } - auto package_json = - TraverseParent(realm, std::filesystem::path(path_value_str)); + std::filesystem::path path; + +#ifdef _WIN32 + std::wstring wide_path = ConvertToWideString(path_value_str, GetACP()); + path = std::filesystem::path(wide_path); +#else + path = std::filesystem::path(path_value_str); +#endif + + auto package_json = TraverseParent(realm, path); if (package_json == nullptr) { return; diff --git a/src/util-inl.h b/src/util-inl.h index a35e15eeed6576..b5ae5950b62767 100644 --- a/src/util-inl.h +++ b/src/util-inl.h @@ -562,6 +562,22 @@ bool IsWindowsBatchFile(const char* filename) { #endif // _WIN32 } +#ifdef _WIN32 +inline std::wstring ConvertToWideString(const std::string& str, + UINT code_page) { + int size_needed = MultiByteToWideChar( + code_page, 0, &str[0], static_cast(str.size()), nullptr, 0); + std::wstring wstrTo(size_needed, 0); + MultiByteToWideChar(code_page, + 0, + &str[0], + static_cast(str.size()), + &wstrTo[0], + size_needed); + return wstrTo; +} +#endif // _WIN32 + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS From 83c56da3286e74ed226ba7a1aaa9b75f436485af Mon Sep 17 00:00:00 2001 From: tjuhaszrh Date: Sat, 25 Jan 2025 10:34:54 +0100 Subject: [PATCH 132/158] src: fix build with GCC 15 Added cstdint to worker_inspector as on more recent version of gcc the build was failing due to changes to libstdc++ and the removal of transitive includes. PR-URL: https://github.com/nodejs/node/pull/56740 Fixes: https://github.com/nodejs/node/issues/56731 Reviewed-By: Antoine du Hamel Reviewed-By: Chengzhong Wu Reviewed-By: Richard Lau Reviewed-By: James M Snell --- src/inspector/worker_inspector.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/inspector/worker_inspector.h b/src/inspector/worker_inspector.h index d3254d5aa0ebe4..24403bb1704c40 100644 --- a/src/inspector/worker_inspector.h +++ b/src/inspector/worker_inspector.h @@ -5,6 +5,7 @@ #error("This header can only be used when inspector is enabled") #endif +#include #include #include #include From f4a9b134c0ab6b6c5ee5504c6792e523a370d013 Mon Sep 17 00:00:00 2001 From: Robin Mehner Date: Sat, 25 Jan 2025 12:02:31 +0100 Subject: [PATCH 133/158] doc: fix typo in example code for util.styleText MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Code shows how to style `errorMessage`, but then only logs out `successMessage` twice. This might trip people up when copying the code. PR-URL: https://github.com/nodejs/node/pull/56720 Reviewed-By: Ulises Gascón Reviewed-By: Richard Lau Reviewed-By: James M Snell Reviewed-By: Luigi Pinca Reviewed-By: Minwoo Jung --- doc/api/util.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/util.md b/doc/api/util.md index bbc4d86f0a2eac..1facf5636714f1 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -1966,7 +1966,7 @@ const errorMessage = styleText( // Validate if process.stderr has TTY { stream: stderr }, ); -console.error(successMessage); +console.error(errorMessage); ``` ```cjs From 55a0135261b874f7c385df852f97877396a5c1f6 Mon Sep 17 00:00:00 2001 From: Burkov Egor Date: Wed, 22 Jan 2025 16:27:26 +0300 Subject: [PATCH 134/158] src: add default value for RSACipherConfig mode field MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using default init of enum is UB Refs: https://github.com/nodejs/node/issues/56693 PR-URL: https://github.com/nodejs/node/pull/56701 Reviewed-By: Juan José Arboleda Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell --- src/crypto/crypto_rsa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crypto/crypto_rsa.h b/src/crypto/crypto_rsa.h index 29b259ae2f5284..6fc48da1aea2cd 100644 --- a/src/crypto/crypto_rsa.h +++ b/src/crypto/crypto_rsa.h @@ -77,7 +77,7 @@ struct RSAKeyExportTraits final { using RSAKeyExportJob = KeyExportJob; struct RSACipherConfig final : public MemoryRetainer { - CryptoJobMode mode; + CryptoJobMode mode = kCryptoJobAsync; ByteSource label; int padding = 0; const EVP_MD* digest = nullptr; From 36b02bf1b18e30ed7ae65f7e208f5543658012bc Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 22 Jan 2025 16:58:49 -0800 Subject: [PATCH 135/158] test: make some requires lazy in common/index PR-URL: https://github.com/nodejs/node/pull/56715 Reviewed-By: Yagiz Nizipli Reviewed-By: Richard Lau Reviewed-By: Matteo Collina --- test/common/index.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/common/index.js b/test/common/index.js index 3647f4554a4647..8113f604dfcdb6 100644 --- a/test/common/index.js +++ b/test/common/index.js @@ -23,7 +23,6 @@ const process = globalThis.process; // Some tests tamper with the process globalThis. const assert = require('assert'); -const { exec, execSync, spawn, spawnSync } = require('child_process'); const fs = require('fs'); const net = require('net'); // Do not require 'os' until needed so that test-os-checked-function can @@ -31,7 +30,6 @@ const net = require('net'); const path = require('path'); const { inspect, getCallSites } = require('util'); const { isMainThread } = require('worker_threads'); -const { isModuleNamespaceObject } = require('util/types'); const tmpdir = require('./tmpdir'); const bits = ['arm64', 'loong64', 'mips', 'mipsel', 'ppc64', 'riscv64', 's390x', 'x64'] @@ -104,6 +102,7 @@ if (process.argv.length === 2 && inspect(flags), 'Use NODE_SKIP_FLAG_CHECK to run the test with the original flags.', ); + const { spawnSync } = require('child_process'); const args = [...flags, ...process.execArgv, ...process.argv.slice(1)]; const options = { encoding: 'utf8', stdio: 'inherit' }; const result = spawnSync(process.execPath, args, options); @@ -232,6 +231,7 @@ function childShouldThrowAndAbort() { // continuous testing and developers' machines escapedArgs[0] = 'ulimit -c 0 && ' + escapedArgs[0]; } + const { exec } = require('child_process'); const child = exec(...escapedArgs); child.on('exit', function onExit(exitCode, signal) { const errMsg = 'Test should have aborted ' + @@ -474,6 +474,7 @@ function canCreateSymLink() { 'System32', 'whoami.exe'); try { + const { execSync } = require('child_process'); const output = execSync(`${whoamiPath} /priv`, { timeout: 1000 }); return output.includes('SeCreateSymbolicLinkPrivilege'); } catch { @@ -780,6 +781,7 @@ function requireNoPackageJSONAbove(dir = __dirname) { } function spawnPromisified(...args) { + const { spawn } = require('child_process'); let stderr = ''; let stdout = ''; @@ -843,6 +845,7 @@ function escapePOSIXShell(cmdParts, ...args) { * @param {object} expectation shape of expected namespace. */ function expectRequiredModule(mod, expectation, checkESModule = true) { + const { isModuleNamespaceObject } = require('util/types'); const clone = { ...mod }; if (Object.hasOwn(mod, 'default') && checkESModule) { assert.strictEqual(mod.__esModule, true); @@ -920,6 +923,7 @@ const common = { }, get inFreeBSDJail() { + const { execSync } = require('child_process'); if (inFreeBSDJail !== null) return inFreeBSDJail; if (exports.isFreeBSD && From e1b0f44d192abe5cda9c48a09ef83f34b3ca610f Mon Sep 17 00:00:00 2001 From: Jonas Date: Sat, 25 Jan 2025 17:02:54 -0500 Subject: [PATCH 136/158] watch: reload env file for --env-file-if-exists PR-URL: https://github.com/nodejs/node/pull/56643 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell --- lib/internal/main/watch_mode.js | 2 +- test/sequential/test-watch-mode.mjs | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/lib/internal/main/watch_mode.js b/lib/internal/main/watch_mode.js index 6e2528e64737c7..60639efb45482d 100644 --- a/lib/internal/main/watch_mode.js +++ b/lib/internal/main/watch_mode.js @@ -33,7 +33,7 @@ markBootstrapComplete(); // TODO(MoLow): Make kill signal configurable const kKillSignal = 'SIGTERM'; const kShouldFilterModules = getOptionValue('--watch-path').length === 0; -const kEnvFile = getOptionValue('--env-file'); +const kEnvFile = getOptionValue('--env-file') || getOptionValue('--env-file-if-exists'); const kWatchedPaths = ArrayPrototypeMap(getOptionValue('--watch-path'), (path) => resolve(path)); const kPreserveOutput = getOptionValue('--watch-preserve-output'); const kCommand = ArrayPrototypeSlice(process.argv, 1); diff --git a/test/sequential/test-watch-mode.mjs b/test/sequential/test-watch-mode.mjs index 39bc7223dffdfc..324cdd10b3b4ef 100644 --- a/test/sequential/test-watch-mode.mjs +++ b/test/sequential/test-watch-mode.mjs @@ -242,6 +242,32 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 } }); + it('should load new env variables when --env-file-if-exists changes', async () => { + const envKey = `TEST_ENV_${Date.now()}`; + const envKey2 = `TEST_ENV_2_${Date.now()}`; + const jsFile = createTmpFile(`console.log('ENV: ' + process.env.${envKey} + '\\n' + 'ENV2: ' + process.env.${envKey2});`); + const envFile = createTmpFile(`${envKey}=value1`, '.env'); + const { done, restart } = runInBackground({ args: ['--watch', `--env-file-if-exists=${envFile}`, jsFile] }); + + try { + await restart(); + writeFileSync(envFile, `${envKey}=value1\n${envKey2}=newValue`); + + // Second restart, after env change + const { stderr, stdout } = await restart(); + + assert.strictEqual(stderr, ''); + assert.deepStrictEqual(stdout, [ + `Restarting ${inspect(jsFile)}`, + 'ENV: value1', + 'ENV2: newValue', + `Completed running ${inspect(jsFile)}`, + ]); + } finally { + await done(); + } + }); + it('should watch changes to a failing file', async () => { const file = createTmpFile('throw new Error("fails");'); const { stderr, stdout } = await runWriteSucceed({ From 59510ab8199381e2278bf14b9ac60e1877329ef6 Mon Sep 17 00:00:00 2001 From: Dario Piotrowicz Date: Sat, 25 Jan 2025 23:32:13 +0000 Subject: [PATCH 137/158] module: fix bad `require.resolve` with option paths for `.` and `..` this change fixes `require.resolve` used with the `paths` option not considering `.` and `..` as relative Fixes: https://github.com/nodejs/node/issues/47000 PR-URL: https://github.com/nodejs/node/pull/56735 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell Reviewed-By: Jordan Harband Reviewed-By: Matteo Collina --- lib/internal/modules/cjs/loader.js | 34 +++++++-------- .../relative/subdir/relative-subdir.js | 1 + ...est-require-resolve-opts-paths-relative.js | 43 +++++++++++++++++++ 3 files changed, 61 insertions(+), 17 deletions(-) create mode 100644 test/fixtures/module-require/relative/subdir/relative-subdir.js create mode 100644 test/parallel/test-require-resolve-opts-paths-relative.js diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index a558185e08ddb1..6608be9d2db029 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -722,18 +722,8 @@ Module._findPath = function(request, paths, isMain, conditions = getCjsCondition ) )); - const isRelative = StringPrototypeCharCodeAt(request, 0) === CHAR_DOT && - ( - request.length === 1 || - StringPrototypeCharCodeAt(request, 1) === CHAR_FORWARD_SLASH || - (isWindows && StringPrototypeCharCodeAt(request, 1) === CHAR_BACKWARD_SLASH) || - (StringPrototypeCharCodeAt(request, 1) === CHAR_DOT && (( - request.length === 2 || - StringPrototypeCharCodeAt(request, 2) === CHAR_FORWARD_SLASH) || - (isWindows && StringPrototypeCharCodeAt(request, 2) === CHAR_BACKWARD_SLASH))) - ); let insidePath = true; - if (isRelative) { + if (isRelative(request)) { const normalizedRequest = path.normalize(request); if (StringPrototypeStartsWith(normalizedRequest, '..')) { insidePath = false; @@ -1328,12 +1318,7 @@ Module._resolveFilename = function(request, parent, isMain, options) { if (typeof options === 'object' && options !== null) { if (ArrayIsArray(options.paths)) { - const isRelative = StringPrototypeStartsWith(request, './') || - StringPrototypeStartsWith(request, '../') || - ((isWindows && StringPrototypeStartsWith(request, '.\\')) || - StringPrototypeStartsWith(request, '..\\')); - - if (isRelative) { + if (isRelative(request)) { paths = options.paths; } else { const fakeParent = new Module('', null); @@ -1978,6 +1963,21 @@ function createRequire(filename) { return createRequireFromPath(filepath); } +/** + * Checks if a path is relative + * @param {string} path the target path + * @returns {boolean} true if the path is relative, false otherwise + */ +function isRelative(path) { + if (StringPrototypeCharCodeAt(path, 0) !== CHAR_DOT) { return false; } + + return path.length === 1 || path === '..' || + StringPrototypeStartsWith(path, './') || + StringPrototypeStartsWith(path, '../') || + ((isWindows && StringPrototypeStartsWith(path, '.\\')) || + StringPrototypeStartsWith(path, '..\\')); +} + Module.createRequire = createRequire; /** diff --git a/test/fixtures/module-require/relative/subdir/relative-subdir.js b/test/fixtures/module-require/relative/subdir/relative-subdir.js new file mode 100644 index 00000000000000..34eb71b3c6ca39 --- /dev/null +++ b/test/fixtures/module-require/relative/subdir/relative-subdir.js @@ -0,0 +1 @@ +exports.value = 'relative subdir'; diff --git a/test/parallel/test-require-resolve-opts-paths-relative.js b/test/parallel/test-require-resolve-opts-paths-relative.js new file mode 100644 index 00000000000000..522a1fdbce82a4 --- /dev/null +++ b/test/parallel/test-require-resolve-opts-paths-relative.js @@ -0,0 +1,43 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fixtures = require('../common/fixtures'); + +if (!common.isMainThread) + common.skip('process.chdir is not available in Workers'); + +const subdir = fixtures.path('module-require', 'relative', 'subdir'); + +process.chdir(subdir); + +// Parent directory paths (`..`) work as intended +{ + assert(require.resolve('.', { paths: ['../'] }).endsWith('index.js')); + assert(require.resolve('./index.js', { paths: ['../'] }).endsWith('index.js')); + + // paths: [".."] should resolve like paths: ["../"] + assert(require.resolve('.', { paths: ['..'] }).endsWith('index.js')); + assert(require.resolve('./index.js', { paths: ['..'] }).endsWith('index.js')); +} + +process.chdir('..'); + +// Current directory paths (`.`) work as intended +{ + assert(require.resolve('.', { paths: ['.'] }).endsWith('index.js')); + assert(require.resolve('./index.js', { paths: ['./'] }).endsWith('index.js')); + + // paths: ["."] should resolve like paths: ["../"] + assert(require.resolve('.', { paths: ['.'] }).endsWith('index.js')); + assert(require.resolve('./index.js', { paths: ['.'] }).endsWith('index.js')); +} + +// Sub directory paths work as intended +{ + // assert.deepStrictEqual(fs.readdirSync('./subdir'), [5]); + assert(require.resolve('./relative-subdir.js', { paths: ['./subdir'] }).endsWith('relative-subdir.js')); + + // paths: ["subdir"] should resolve like paths: ["./subdir"] + assert(require.resolve('./relative-subdir.js', { paths: ['subdir'] }).endsWith('relative-subdir.js')); +} From 38acdb57ebbfe552e534fb3fe469cc8f890ac715 Mon Sep 17 00:00:00 2001 From: Dario Piotrowicz Date: Sun, 26 Jan 2025 01:30:11 +0000 Subject: [PATCH 138/158] doc: add note regarding commit message trailers Co-authored-by: Yagiz Nizipli Co-authored-by: Antoine du Hamel PR-URL: https://github.com/nodejs/node/pull/56736 Reviewed-By: James M Snell Reviewed-By: Antoine du Hamel Reviewed-By: Rafael Gonzaga Reviewed-By: Luigi Pinca --- doc/contributing/pull-requests.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/contributing/pull-requests.md b/doc/contributing/pull-requests.md index 2ad538b3fd8e29..8914d60c95aa2f 100644 --- a/doc/contributing/pull-requests.md +++ b/doc/contributing/pull-requests.md @@ -184,6 +184,11 @@ A good commit message should describe what changed and why. of the log. Use the `Fixes:` prefix and the full issue URL. For other references use `Refs:`. + `Fixes:` and `Refs:` trailers get automatically added to your commit message + when the Pull Request lands as long as they are included in the + Pull Request's description. If the Pull Request lands in several commits, + by default the trailers found in the description are added to each commits. + Examples: * `Fixes: https://github.com/nodejs/node/issues/1337` From 8fb03d8f433e6d404f39f9d5411bbdfa37836171 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Wed, 15 Jan 2025 13:57:47 -0800 Subject: [PATCH 139/158] src: move more crypto to ncrypto PR-URL: https://github.com/nodejs/node/pull/56653 Reviewed-By: Yagiz Nizipli --- deps/ncrypto/ncrypto.cc | 942 ++++++++++++++++++++++++++++++++++- deps/ncrypto/ncrypto.h | 289 ++++++++++- src/crypto/crypto_cipher.cc | 126 ++--- src/crypto/crypto_cipher.h | 16 +- src/crypto/crypto_common.cc | 9 +- src/crypto/crypto_context.cc | 2 +- src/crypto/crypto_dh.cc | 24 +- src/crypto/crypto_dsa.cc | 47 +- src/crypto/crypto_ec.cc | 71 +-- src/crypto/crypto_hash.cc | 140 +++--- src/crypto/crypto_hkdf.cc | 4 +- src/crypto/crypto_hmac.cc | 75 +-- src/crypto/crypto_keygen.cc | 6 +- src/crypto/crypto_keys.cc | 15 +- src/crypto/crypto_pbkdf2.cc | 6 +- src/crypto/crypto_random.cc | 22 +- src/crypto/crypto_rsa.cc | 291 ++++------- src/crypto/crypto_sig.cc | 596 ++++++++++------------ src/crypto/crypto_sig.h | 49 +- src/crypto/crypto_tls.cc | 76 ++- src/crypto/crypto_tls.h | 22 +- src/crypto/crypto_util.cc | 13 +- src/crypto/crypto_util.h | 45 +- src/crypto/crypto_x509.cc | 227 ++++----- src/crypto/crypto_x509.h | 10 +- 25 files changed, 1989 insertions(+), 1134 deletions(-) diff --git a/deps/ncrypto/ncrypto.cc b/deps/ncrypto/ncrypto.cc index ce2e7b384eb198..be3ef98d763366 100644 --- a/deps/ncrypto/ncrypto.cc +++ b/deps/ncrypto/ncrypto.cc @@ -15,8 +15,23 @@ #include "dh-primes.h" #endif // OPENSSL_IS_BORINGSSL +// EVP_PKEY_CTX_set_dsa_paramgen_q_bits was added in OpenSSL 1.1.1e. +#if OPENSSL_VERSION_NUMBER < 0x1010105fL +#define EVP_PKEY_CTX_set_dsa_paramgen_q_bits(ctx, qbits) \ + EVP_PKEY_CTX_ctrl((ctx), \ + EVP_PKEY_DSA, \ + EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DSA_PARAMGEN_Q_BITS, \ + (qbits), \ + nullptr) +#endif + namespace ncrypto { namespace { +using BignumCtxPointer = DeleteFnPtr; +using BignumGenCallbackPointer = DeleteFnPtr; +using NetscapeSPKIPointer = DeleteFnPtr; + static constexpr int kX509NameFlagsRFC2253WithinUtf8JSON = XN_FLAG_RFC2253 & ~ASN1_STRFLGS_ESC_MSB & ~ASN1_STRFLGS_ESC_CTRL; } // namespace @@ -87,6 +102,10 @@ DataPointer DataPointer::Alloc(size_t len) { return DataPointer(OPENSSL_zalloc(len), len); } +DataPointer DataPointer::Copy(const Buffer& buffer) { + return DataPointer(OPENSSL_memdup(buffer.data, buffer.len), buffer.len); +} + DataPointer::DataPointer(void* data, size_t length) : data_(data), len_(length) {} @@ -109,6 +128,11 @@ DataPointer::~DataPointer() { reset(); } +void DataPointer::zero() { + if (!data_) return; + OPENSSL_cleanse(data_, len_); +} + void DataPointer::reset(void* data, size_t length) { if (data_ != nullptr) { OPENSSL_clear_free(data_, len_); @@ -131,6 +155,15 @@ Buffer DataPointer::release() { return buf; } +DataPointer DataPointer::resize(size_t len) { + size_t actual_len = std::min(len_, len); + auto buf = release(); + if (actual_len == len_) return DataPointer(buf); + buf.data = OPENSSL_realloc(buf.data, actual_len); + buf.len = actual_len; + return DataPointer(buf); +} + // ============================================================================ bool isFipsEnabled() { #if OPENSSL_VERSION_MAJOR >= 3 @@ -782,7 +815,7 @@ bool PrintGeneralName(const BIOPointer& out, const GENERAL_NAME* gen) { bool SafeX509SubjectAltNamePrint(const BIOPointer& out, X509_EXTENSION* ext) { auto ret = OBJ_obj2nid(X509_EXTENSION_get_object(ext)); - NCRYPTO_ASSERT_EQUAL(ret, NID_subject_alt_name, "unexpected extension type"); + if (ret != NID_subject_alt_name) return false; GENERAL_NAMES* names = static_cast(X509V3_EXT_d2i(ext)); if (names == nullptr) return false; @@ -805,7 +838,7 @@ bool SafeX509SubjectAltNamePrint(const BIOPointer& out, X509_EXTENSION* ext) { bool SafeX509InfoAccessPrint(const BIOPointer& out, X509_EXTENSION* ext) { auto ret = OBJ_obj2nid(X509_EXTENSION_get_object(ext)); - NCRYPTO_ASSERT_EQUAL(ret, NID_info_access, "unexpected extension type"); + if (ret != NID_info_access) return false; AUTHORITY_INFO_ACCESS* descs = static_cast(X509V3_EXT_d2i(ext)); @@ -1132,6 +1165,49 @@ Result X509Pointer::Parse( return Result(ERR_get_error()); } +bool X509View::enumUsages(UsageCallback callback) const { + if (cert_ == nullptr) return false; + StackOfASN1 eku(static_cast( + X509_get_ext_d2i(cert_, NID_ext_key_usage, nullptr, nullptr))); + if (!eku) return false; + const int count = sk_ASN1_OBJECT_num(eku.get()); + char buf[256]{}; + + for (int i = 0; i < count; i++) { + if (OBJ_obj2txt(buf, sizeof(buf), sk_ASN1_OBJECT_value(eku.get(), i), 1) >= + 0) { + callback(buf); + } + } + return true; +} + +bool X509View::ifRsa(KeyCallback callback) const { + if (cert_ == nullptr) return true; + OSSL3_CONST EVP_PKEY* pkey = X509_get0_pubkey(cert_); + auto id = EVP_PKEY_id(pkey); + if (id == EVP_PKEY_RSA || id == EVP_PKEY_RSA2 || id == EVP_PKEY_RSA_PSS) { + Rsa rsa(EVP_PKEY_get0_RSA(pkey)); + if (!rsa) [[unlikely]] + return true; + return callback(rsa); + } + return true; +} + +bool X509View::ifEc(KeyCallback callback) const { + if (cert_ == nullptr) return true; + OSSL3_CONST EVP_PKEY* pkey = X509_get0_pubkey(cert_); + auto id = EVP_PKEY_id(pkey); + if (id == EVP_PKEY_EC) { + Ec ec(EVP_PKEY_get0_EC_KEY(pkey)); + if (!ec) [[unlikely]] + return true; + return callback(ec); + } + return true; +} + X509Pointer X509Pointer::IssuerFrom(const SSLPointer& ssl, const X509View& view) { return IssuerFrom(SSL_get_SSL_CTX(ssl.get()), view); @@ -1493,7 +1569,7 @@ DataPointer DHPointer::stateless(const EVPKeyPointer& ourKey, size_t out_size; if (!ourKey || !theirKey) return {}; - EVPKeyCtxPointer ctx(EVP_PKEY_CTX_new(ourKey.get(), nullptr)); + auto ctx = EVPKeyCtxPointer::New(ourKey); if (!ctx || EVP_PKEY_derive_init(ctx.get()) <= 0 || EVP_PKEY_derive_set_peer(ctx.get(), theirKey.get()) <= 0 || EVP_PKEY_derive(ctx.get(), nullptr, &out_size) <= 0) { @@ -1522,9 +1598,18 @@ DataPointer DHPointer::stateless(const EVPKeyPointer& ourKey, // KDF const EVP_MD* getDigestByName(const std::string_view name) { + // Historically, "dss1" and "DSS1" were DSA aliases for SHA-1 + // exposed through the public API. + if (name == "dss1" || name == "DSS1") [[unlikely]] { + return EVP_sha1(); + } return EVP_get_digestbyname(name.data()); } +const EVP_CIPHER* getCipherByName(const std::string_view name) { + return EVP_get_cipherbyname(name.data()); +} + bool checkHkdfLength(const EVP_MD* md, size_t length) { // HKDF-Expand computes up to 255 HMAC blocks, each having as many bits as // the output of the hash function. 255 is a hard limit because HKDF appends @@ -1547,8 +1632,7 @@ DataPointer hkdf(const EVP_MD* md, return {}; } - EVPKeyCtxPointer ctx = - EVPKeyCtxPointer(EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, nullptr)); + auto ctx = EVPKeyCtxPointer::NewFromID(EVP_PKEY_HKDF); if (!ctx || !EVP_PKEY_derive_init(ctx.get()) || !EVP_PKEY_CTX_set_hkdf_md(ctx.get(), md) || !EVP_PKEY_CTX_add1_hkdf_info(ctx.get(), info.data, info.len)) { @@ -1704,6 +1788,26 @@ EVPKeyPointer EVPKeyPointer::NewRawPrivate( EVP_PKEY_new_raw_private_key(id, nullptr, data.data, data.len)); } +EVPKeyPointer EVPKeyPointer::NewDH(DHPointer&& dh) { + if (!dh) return {}; + auto key = New(); + if (!key) return {}; + if (EVP_PKEY_assign_DH(key.get(), dh.get())) { + dh.release(); + } + return key; +} + +EVPKeyPointer EVPKeyPointer::NewRSA(RSAPointer&& rsa) { + if (!rsa) return {}; + auto key = New(); + if (!key) return {}; + if (EVP_PKEY_assign_RSA(key.get(), rsa.get())) { + rsa.release(); + } + return key; +} + EVPKeyPointer::EVPKeyPointer(EVP_PKEY* pkey) : pkey_(pkey) {} EVPKeyPointer::EVPKeyPointer(EVPKeyPointer&& other) noexcept @@ -1757,7 +1861,7 @@ size_t EVPKeyPointer::size() const { EVPKeyCtxPointer EVPKeyPointer::newCtx() const { if (!pkey_) return {}; - return EVPKeyCtxPointer(EVP_PKEY_CTX_new(get(), nullptr)); + return EVPKeyCtxPointer::New(*this); } size_t EVPKeyPointer::rawPublicKeySize() const { @@ -2230,6 +2334,84 @@ Result EVPKeyPointer::writePublicKey( return bio; } +bool EVPKeyPointer::isRsaVariant() const { + if (!pkey_) return false; + int type = id(); + return type == EVP_PKEY_RSA || type == EVP_PKEY_RSA2 || + type == EVP_PKEY_RSA_PSS; +} + +bool EVPKeyPointer::isOneShotVariant() const { + if (!pkey_) return false; + int type = id(); + return type == EVP_PKEY_ED25519 || type == EVP_PKEY_ED448; +} + +bool EVPKeyPointer::isSigVariant() const { + if (!pkey_) return false; + int type = id(); + return type == EVP_PKEY_EC || type == EVP_PKEY_DSA; +} + +int EVPKeyPointer::getDefaultSignPadding() const { + return id() == EVP_PKEY_RSA_PSS ? RSA_PKCS1_PSS_PADDING : RSA_PKCS1_PADDING; +} + +std::optional EVPKeyPointer::getBytesOfRS() const { + if (!pkey_) return std::nullopt; + int bits, id = base_id(); + + if (id == EVP_PKEY_DSA) { + const DSA* dsa_key = EVP_PKEY_get0_DSA(get()); + // Both r and s are computed mod q, so their width is limited by that of q. + bits = BignumPointer::GetBitCount(DSA_get0_q(dsa_key)); + } else if (id == EVP_PKEY_EC) { + bits = EC_GROUP_order_bits(ECKeyPointer::GetGroup(*this)); + } else { + return std::nullopt; + } + + return (bits + 7) / 8; +} + +EVPKeyPointer::operator Rsa() const { + int type = id(); + if (type != EVP_PKEY_RSA && type != EVP_PKEY_RSA_PSS) return {}; + + // TODO(tniessen): Remove the "else" branch once we drop support for OpenSSL + // versions older than 1.1.1e via FIPS / dynamic linking. + OSSL3_CONST RSA* rsa; + if (OPENSSL_VERSION_NUMBER >= 0x1010105fL) { + rsa = EVP_PKEY_get0_RSA(get()); + } else { + rsa = static_cast(EVP_PKEY_get0(get())); + } + if (rsa == nullptr) return {}; + return Rsa(rsa); +} + +bool EVPKeyPointer::validateDsaParameters() const { + if (!pkey_) return false; + /* Validate DSA2 parameters from FIPS 186-4 */ +#if OPENSSL_VERSION_MAJOR >= 3 + if (EVP_default_properties_is_fips_enabled(nullptr) && EVP_PKEY_DSA == id()) { +#else + if (FIPS_mode() && EVP_PKEY_DSA == id()) { +#endif + const DSA* dsa = EVP_PKEY_get0_DSA(pkey_.get()); + const BIGNUM* p; + const BIGNUM* q; + DSA_get0_pqg(dsa, &p, &q, nullptr); + int L = BignumPointer::GetBitCount(p); + int N = BignumPointer::GetBitCount(q); + + return (L == 1024 && N == 160) || (L == 2048 && N == 224) || + (L == 2048 && N == 256) || (L == 3072 && N == 256); + } + + return true; +} + // ============================================================================ SSLPointer::SSLPointer(SSL* ssl) : ssl_(ssl) {} @@ -2883,4 +3065,752 @@ ECKeyPointer ECKeyPointer::New(const EC_GROUP* group) { return ptr; } +// ============================================================================ + +EVPKeyCtxPointer::EVPKeyCtxPointer() : ctx_(nullptr) {} + +EVPKeyCtxPointer::EVPKeyCtxPointer(EVP_PKEY_CTX* ctx) : ctx_(ctx) {} + +EVPKeyCtxPointer::EVPKeyCtxPointer(EVPKeyCtxPointer&& other) noexcept + : ctx_(other.release()) {} + +EVPKeyCtxPointer& EVPKeyCtxPointer::operator=( + EVPKeyCtxPointer&& other) noexcept { + ctx_.reset(other.release()); + return *this; +} + +EVPKeyCtxPointer::~EVPKeyCtxPointer() { + reset(); +} + +void EVPKeyCtxPointer::reset(EVP_PKEY_CTX* ctx) { + ctx_.reset(ctx); +} + +EVP_PKEY_CTX* EVPKeyCtxPointer::release() { + return ctx_.release(); +} + +EVPKeyCtxPointer EVPKeyCtxPointer::New(const EVPKeyPointer& key) { + if (!key) return {}; + return EVPKeyCtxPointer(EVP_PKEY_CTX_new(key.get(), nullptr)); +} + +EVPKeyCtxPointer EVPKeyCtxPointer::NewFromID(int id) { + return EVPKeyCtxPointer(EVP_PKEY_CTX_new_id(id, nullptr)); +} + +bool EVPKeyCtxPointer::initForDerive(const EVPKeyPointer& peer) { + if (!ctx_) return false; + if (EVP_PKEY_derive_init(ctx_.get()) != 1) return false; + return EVP_PKEY_derive_set_peer(ctx_.get(), peer.get()) == 1; +} + +bool EVPKeyCtxPointer::initForKeygen() { + if (!ctx_) return false; + return EVP_PKEY_keygen_init(ctx_.get()) == 1; +} + +bool EVPKeyCtxPointer::initForParamgen() { + if (!ctx_) return false; + return EVP_PKEY_paramgen_init(ctx_.get()) == 1; +} + +int EVPKeyCtxPointer::initForVerify() { + if (!ctx_) return 0; + return EVP_PKEY_verify_init(ctx_.get()); +} + +int EVPKeyCtxPointer::initForSign() { + if (!ctx_) return 0; + return EVP_PKEY_sign_init(ctx_.get()); +} + +bool EVPKeyCtxPointer::setDhParameters(int prime_size, uint32_t generator) { + if (!ctx_) return false; + return EVP_PKEY_CTX_set_dh_paramgen_prime_len(ctx_.get(), prime_size) == 1 && + EVP_PKEY_CTX_set_dh_paramgen_generator(ctx_.get(), generator) == 1; +} + +bool EVPKeyCtxPointer::setDsaParameters(uint32_t bits, + std::optional q_bits) { + if (!ctx_) return false; + if (EVP_PKEY_CTX_set_dsa_paramgen_bits(ctx_.get(), bits) != 1) { + return false; + } + if (q_bits.has_value() && + EVP_PKEY_CTX_set_dsa_paramgen_q_bits(ctx_.get(), q_bits.value()) != 1) { + return false; + } + return true; +} + +bool EVPKeyCtxPointer::setEcParameters(int curve, int encoding) { + if (!ctx_) return false; + return EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx_.get(), curve) == 1 && + EVP_PKEY_CTX_set_ec_param_enc(ctx_.get(), encoding) == 1; +} + +bool EVPKeyCtxPointer::setRsaOaepMd(const EVP_MD* md) { + if (md == nullptr || !ctx_) return false; + return EVP_PKEY_CTX_set_rsa_oaep_md(ctx_.get(), md) > 0; +} + +bool EVPKeyCtxPointer::setRsaMgf1Md(const EVP_MD* md) { + if (md == nullptr || !ctx_) return false; + return EVP_PKEY_CTX_set_rsa_mgf1_md(ctx_.get(), md) > 0; +} + +bool EVPKeyCtxPointer::setRsaPadding(int padding) { + return setRsaPadding(ctx_.get(), padding, std::nullopt); +} + +bool EVPKeyCtxPointer::setRsaPadding(EVP_PKEY_CTX* ctx, + int padding, + std::optional salt_len) { + if (ctx == nullptr) return false; + if (EVP_PKEY_CTX_set_rsa_padding(ctx, padding) <= 0) { + return false; + } + if (padding == RSA_PKCS1_PSS_PADDING && salt_len.has_value()) { + return EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, salt_len.value()) > 0; + } + return true; +} + +bool EVPKeyCtxPointer::setRsaKeygenBits(int bits) { + if (!ctx_) return false; + return EVP_PKEY_CTX_set_rsa_keygen_bits(ctx_.get(), bits) == 1; +} + +bool EVPKeyCtxPointer::setRsaKeygenPubExp(BignumPointer&& e) { + if (!ctx_) return false; + if (EVP_PKEY_CTX_set_rsa_keygen_pubexp(ctx_.get(), e.get()) == 1) { + // The ctx_ takes ownership of e on success. + e.release(); + return true; + } + return false; +} + +bool EVPKeyCtxPointer::setRsaPssKeygenMd(const EVP_MD* md) { + if (md == nullptr || !ctx_) return false; + return EVP_PKEY_CTX_set_rsa_pss_keygen_md(ctx_.get(), md) > 0; +} + +bool EVPKeyCtxPointer::setRsaPssKeygenMgf1Md(const EVP_MD* md) { + if (md == nullptr || !ctx_) return false; + return EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md(ctx_.get(), md) > 0; +} + +bool EVPKeyCtxPointer::setRsaPssSaltlen(int salt_len) { + if (!ctx_) return false; + return EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen(ctx_.get(), salt_len) > 0; +} + +bool EVPKeyCtxPointer::setRsaImplicitRejection() { + if (!ctx_) return false; + return EVP_PKEY_CTX_ctrl_str( + ctx_.get(), "rsa_pkcs1_implicit_rejection", "1") > 0; + // From the doc -2 means that the option is not supported. + // The default for the option is enabled and if it has been + // specifically disabled we want to respect that so we will + // not throw an error if the option is supported regardless + // of how it is set. The call to set the value + // will not affect what is used since a different context is + // used in the call if the option is supported +} + +bool EVPKeyCtxPointer::setRsaOaepLabel(DataPointer&& data) { + if (!ctx_) return false; + if (EVP_PKEY_CTX_set0_rsa_oaep_label(ctx_.get(), + static_cast(data.get()), + data.size()) > 0) { + // The ctx_ takes ownership of data on success. + data.release(); + return true; + } + return false; +} + +bool EVPKeyCtxPointer::setSignatureMd(const EVPMDCtxPointer& md) { + if (!ctx_) return false; + return EVP_PKEY_CTX_set_signature_md(ctx_.get(), EVP_MD_CTX_md(md.get())) == + 1; +} + +bool EVPKeyCtxPointer::initForEncrypt() { + if (!ctx_) return false; + return EVP_PKEY_encrypt_init(ctx_.get()) == 1; +} + +bool EVPKeyCtxPointer::initForDecrypt() { + if (!ctx_) return false; + return EVP_PKEY_decrypt_init(ctx_.get()) == 1; +} + +DataPointer EVPKeyCtxPointer::derive() const { + if (!ctx_) return {}; + size_t len = 0; + if (EVP_PKEY_derive(ctx_.get(), nullptr, &len) != 1) return {}; + auto data = DataPointer::Alloc(len); + if (!data) return {}; + if (EVP_PKEY_derive( + ctx_.get(), static_cast(data.get()), &len) != 1) { + return {}; + } + return data; +} + +EVPKeyPointer EVPKeyCtxPointer::paramgen() const { + if (!ctx_) return {}; + EVP_PKEY* key = nullptr; + if (EVP_PKEY_paramgen(ctx_.get(), &key) != 1) return {}; + return EVPKeyPointer(key); +} + +bool EVPKeyCtxPointer::publicCheck() const { + if (!ctx_) return false; +#if OPENSSL_VERSION_MAJOR >= 3 + return EVP_PKEY_public_check_quick(ctx_.get()) == 1; +#else + return EVP_PKEY_public_check(ctx_.get()) == 1; +#endif +} + +bool EVPKeyCtxPointer::privateCheck() const { + if (!ctx_) return false; + return EVP_PKEY_check(ctx_.get()) == 1; +} + +bool EVPKeyCtxPointer::verify(const Buffer& sig, + const Buffer& data) { + if (!ctx_) return false; + return EVP_PKEY_verify(ctx_.get(), sig.data, sig.len, data.data, data.len) == + 1; +} + +DataPointer EVPKeyCtxPointer::sign(const Buffer& data) { + if (!ctx_) return {}; + size_t len = 0; + if (EVP_PKEY_sign(ctx_.get(), nullptr, &len, data.data, data.len) != 1) { + return {}; + } + auto buf = DataPointer::Alloc(len); + if (!buf) return {}; + if (EVP_PKEY_sign(ctx_.get(), + static_cast(buf.get()), + &len, + data.data, + data.len) != 1) { + return {}; + } + return buf.resize(len); +} + +bool EVPKeyCtxPointer::signInto(const Buffer& data, + Buffer* sig) { + if (!ctx_) return false; + size_t len = sig->len; + if (EVP_PKEY_sign(ctx_.get(), sig->data, &len, data.data, data.len) != 1) { + return false; + } + sig->len = len; + return true; +} + +// ============================================================================ + +namespace { + +using EVP_PKEY_cipher_init_t = int(EVP_PKEY_CTX* ctx); +using EVP_PKEY_cipher_t = int(EVP_PKEY_CTX* ctx, + unsigned char* out, + size_t* outlen, + const unsigned char* in, + size_t inlen); + +template +DataPointer RSA_Cipher(const EVPKeyPointer& key, + const Rsa::CipherParams& params, + const Buffer in) { + if (!key) return {}; + EVPKeyCtxPointer ctx = key.newCtx(); + + if (!ctx || init(ctx.get()) <= 0 || !ctx.setRsaPadding(params.padding) || + (params.digest != nullptr && (!ctx.setRsaOaepMd(params.digest) || + !ctx.setRsaMgf1Md(params.digest)))) { + return {}; + } + + if (params.label.len != 0 && params.label.data != nullptr && + !ctx.setRsaOaepLabel(DataPointer::Copy(params.label))) { + return {}; + } + + size_t out_len = 0; + if (cipher(ctx.get(), + nullptr, + &out_len, + reinterpret_cast(in.data), + in.len) <= 0) { + return {}; + } + + auto buf = DataPointer::Alloc(out_len); + if (!buf) return {}; + + if (cipher(ctx.get(), + static_cast(buf.get()), + &out_len, + static_cast(in.data), + in.len) <= 0) { + return {}; + } + + return buf.resize(out_len); +} + +template +DataPointer CipherImpl(const EVPKeyPointer& key, + const Rsa::CipherParams& params, + const Buffer in) { + if (!key) return {}; + EVPKeyCtxPointer ctx = key.newCtx(); + if (!ctx || init(ctx.get()) <= 0 || !ctx.setRsaPadding(params.padding) || + (params.digest != nullptr && !ctx.setRsaOaepMd(params.digest))) { + return {}; + } + + if (params.label.len != 0 && params.label.data != nullptr && + !ctx.setRsaOaepLabel(DataPointer::Copy(params.label))) { + return {}; + } + + size_t out_len = 0; + if (cipher(ctx.get(), + nullptr, + &out_len, + static_cast(in.data), + in.len) <= 0) { + return {}; + } + + auto buf = DataPointer::Alloc(out_len); + if (!buf) return {}; + + if (cipher(ctx.get(), + static_cast(buf.get()), + &out_len, + static_cast(in.data), + in.len) <= 0) { + return {}; + } + + return buf.resize(out_len); +} +} // namespace + +Rsa::Rsa() : rsa_(nullptr) {} + +Rsa::Rsa(OSSL3_CONST RSA* ptr) : rsa_(ptr) {} + +const Rsa::PublicKey Rsa::getPublicKey() const { + if (rsa_ == nullptr) return {}; + PublicKey key; + RSA_get0_key(rsa_, &key.n, &key.e, &key.d); + return key; +} + +const Rsa::PrivateKey Rsa::getPrivateKey() const { + if (rsa_ == nullptr) return {}; + PrivateKey key; + RSA_get0_factors(rsa_, &key.p, &key.q); + RSA_get0_crt_params(rsa_, &key.dp, &key.dq, &key.qi); + return key; +} + +const std::optional Rsa::getPssParams() const { + if (rsa_ == nullptr) return std::nullopt; + const RSA_PSS_PARAMS* params = RSA_get0_pss_params(rsa_); + if (params == nullptr) return std::nullopt; + Rsa::PssParams ret{ + .digest = OBJ_nid2ln(NID_sha1), + .mgf1_digest = OBJ_nid2ln(NID_sha1), + .salt_length = 20, + }; + + if (params->hashAlgorithm != nullptr) { + const ASN1_OBJECT* hash_obj; + X509_ALGOR_get0(&hash_obj, nullptr, nullptr, params->hashAlgorithm); + ret.digest = OBJ_nid2ln(OBJ_obj2nid(hash_obj)); + } + + if (params->maskGenAlgorithm != nullptr) { + const ASN1_OBJECT* mgf_obj; + X509_ALGOR_get0(&mgf_obj, nullptr, nullptr, params->maskGenAlgorithm); + int mgf_nid = OBJ_obj2nid(mgf_obj); + if (mgf_nid == NID_mgf1) { + const ASN1_OBJECT* mgf1_hash_obj; + X509_ALGOR_get0(&mgf1_hash_obj, nullptr, nullptr, params->maskHash); + ret.mgf1_digest = OBJ_nid2ln(OBJ_obj2nid(mgf1_hash_obj)); + } + } + + if (params->saltLength != nullptr) { + if (ASN1_INTEGER_get_int64(&ret.salt_length, params->saltLength) != 1) { + return std::nullopt; + } + } + return ret; +} + +bool Rsa::setPublicKey(BignumPointer&& n, BignumPointer&& e) { + if (!n || !e) return false; + if (RSA_set0_key(const_cast(rsa_), n.get(), e.get(), nullptr) == 1) { + n.release(); + e.release(); + return true; + } + return false; +} + +bool Rsa::setPrivateKey(BignumPointer&& d, + BignumPointer&& q, + BignumPointer&& p, + BignumPointer&& dp, + BignumPointer&& dq, + BignumPointer&& qi) { + if (!RSA_set0_key(const_cast(rsa_), nullptr, nullptr, d.get())) { + return false; + } + d.release(); + + if (!RSA_set0_factors(const_cast(rsa_), p.get(), q.get())) { + return false; + } + p.release(); + q.release(); + + if (!RSA_set0_crt_params( + const_cast(rsa_), dp.get(), dq.get(), qi.get())) { + return false; + } + dp.release(); + dq.release(); + qi.release(); + return true; +} + +DataPointer Rsa::encrypt(const EVPKeyPointer& key, + const Rsa::CipherParams& params, + const Buffer in) { + if (!key) return {}; + return RSA_Cipher(key, params, in); +} + +DataPointer Rsa::decrypt(const EVPKeyPointer& key, + const Rsa::CipherParams& params, + const Buffer in) { + if (!key) return {}; + return RSA_Cipher(key, params, in); +} + +DataPointer Cipher::encrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in) { + // public operation + return CipherImpl(key, params, in); +} + +DataPointer Cipher::decrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in) { + // private operation + return CipherImpl(key, params, in); +} + +DataPointer Cipher::sign(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in) { + // private operation + return CipherImpl(key, params, in); +} + +DataPointer Cipher::recover(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in) { + // public operation + return CipherImpl( + key, params, in); +} + +// ============================================================================ + +Ec::Ec() : ec_(nullptr) {} + +Ec::Ec(OSSL3_CONST EC_KEY* key) : ec_(key) {} + +const EC_GROUP* Ec::getGroup() const { + return ECKeyPointer::GetGroup(ec_); +} + +int Ec::getCurve() const { + return EC_GROUP_get_curve_name(getGroup()); +} + +// ============================================================================ + +EVPMDCtxPointer::EVPMDCtxPointer() : ctx_(nullptr) {} + +EVPMDCtxPointer::EVPMDCtxPointer(EVP_MD_CTX* ctx) : ctx_(ctx) {} + +EVPMDCtxPointer::EVPMDCtxPointer(EVPMDCtxPointer&& other) noexcept + : ctx_(other.release()) {} + +EVPMDCtxPointer& EVPMDCtxPointer::operator=(EVPMDCtxPointer&& other) noexcept { + ctx_.reset(other.release()); + return *this; +} + +EVPMDCtxPointer::~EVPMDCtxPointer() { + reset(); +} + +void EVPMDCtxPointer::reset(EVP_MD_CTX* ctx) { + ctx_.reset(ctx); +} + +EVP_MD_CTX* EVPMDCtxPointer::release() { + return ctx_.release(); +} + +bool EVPMDCtxPointer::digestInit(const EVP_MD* digest) { + if (!ctx_) return false; + return EVP_DigestInit_ex(ctx_.get(), digest, nullptr) > 0; +} + +bool EVPMDCtxPointer::digestUpdate(const Buffer& in) { + if (!ctx_) return false; + return EVP_DigestUpdate(ctx_.get(), in.data, in.len) > 0; +} + +DataPointer EVPMDCtxPointer::digestFinal(size_t length) { + if (!ctx_) return {}; + + auto buf = DataPointer::Alloc(length); + if (!buf) return {}; + + Buffer buffer = buf; + + if (!digestFinalInto(&buffer)) [[unlikely]] { + return {}; + } + + return buf; +} + +bool EVPMDCtxPointer::digestFinalInto(Buffer* buf) { + if (!ctx_) return false; + + auto ptr = static_cast(buf->data); + + int ret = (buf->len == getExpectedSize()) + ? EVP_DigestFinal_ex(ctx_.get(), ptr, nullptr) + : EVP_DigestFinalXOF(ctx_.get(), ptr, buf->len); + + if (ret != 1) [[unlikely]] + return false; + + return true; +} + +size_t EVPMDCtxPointer::getExpectedSize() { + if (!ctx_) return 0; + return EVP_MD_CTX_size(ctx_.get()); +} + +size_t EVPMDCtxPointer::getDigestSize() const { + return EVP_MD_size(getDigest()); +} + +const EVP_MD* EVPMDCtxPointer::getDigest() const { + if (!ctx_) return nullptr; + return EVP_MD_CTX_md(ctx_.get()); +} + +bool EVPMDCtxPointer::hasXofFlag() const { + if (!ctx_) return false; + return (EVP_MD_flags(getDigest()) & EVP_MD_FLAG_XOF) == EVP_MD_FLAG_XOF; +} + +bool EVPMDCtxPointer::copyTo(const EVPMDCtxPointer& other) const { + if (!ctx_ || !other) return {}; + if (EVP_MD_CTX_copy(other.get(), ctx_.get()) != 1) return false; + return true; +} + +std::optional EVPMDCtxPointer::signInit(const EVPKeyPointer& key, + const EVP_MD* digest) { + EVP_PKEY_CTX* ctx = nullptr; + if (!EVP_DigestSignInit(ctx_.get(), &ctx, digest, nullptr, key.get())) { + return std::nullopt; + } + return ctx; +} + +std::optional EVPMDCtxPointer::verifyInit( + const EVPKeyPointer& key, const EVP_MD* digest) { + EVP_PKEY_CTX* ctx = nullptr; + if (!EVP_DigestVerifyInit(ctx_.get(), &ctx, digest, nullptr, key.get())) { + return std::nullopt; + } + return ctx; +} + +DataPointer EVPMDCtxPointer::signOneShot( + const Buffer& buf) const { + if (!ctx_) return {}; + size_t len; + if (!EVP_DigestSign(ctx_.get(), nullptr, &len, buf.data, buf.len)) { + return {}; + } + auto data = DataPointer::Alloc(len); + if (!data) [[unlikely]] + return {}; + + if (!EVP_DigestSign(ctx_.get(), + static_cast(data.get()), + &len, + buf.data, + buf.len)) { + return {}; + } + return data; +} + +DataPointer EVPMDCtxPointer::sign( + const Buffer& buf) const { + if (!ctx_) [[unlikely]] + return {}; + size_t len; + if (!EVP_DigestSignUpdate(ctx_.get(), buf.data, buf.len) || + !EVP_DigestSignFinal(ctx_.get(), nullptr, &len)) { + return {}; + } + auto data = DataPointer::Alloc(len); + if (!data) [[unlikely]] + return {}; + if (!EVP_DigestSignFinal( + ctx_.get(), static_cast(data.get()), &len)) { + return {}; + } + return data.resize(len); +} + +bool EVPMDCtxPointer::verify(const Buffer& buf, + const Buffer& sig) const { + if (!ctx_) return false; + int ret = EVP_DigestVerify(ctx_.get(), sig.data, sig.len, buf.data, buf.len); + return ret == 1; +} + +EVPMDCtxPointer EVPMDCtxPointer::New() { + return EVPMDCtxPointer(EVP_MD_CTX_new()); +} + +// ============================================================================ + +bool extractP1363(const Buffer& buf, + unsigned char* dest, + size_t n) { + auto asn1_sig = ECDSASigPointer::Parse(buf); + if (!asn1_sig) return false; + + return BignumPointer::EncodePaddedInto(asn1_sig.r(), dest, n) > 0 && + BignumPointer::EncodePaddedInto(asn1_sig.s(), dest + n, n) > 0; +} + +// ============================================================================ + +HMACCtxPointer::HMACCtxPointer() : ctx_(nullptr) {} + +HMACCtxPointer::HMACCtxPointer(HMAC_CTX* ctx) : ctx_(ctx) {} + +HMACCtxPointer::HMACCtxPointer(HMACCtxPointer&& other) noexcept + : ctx_(other.release()) {} + +HMACCtxPointer& HMACCtxPointer::operator=(HMACCtxPointer&& other) noexcept { + ctx_.reset(other.release()); + return *this; +} + +HMACCtxPointer::~HMACCtxPointer() { + reset(); +} + +void HMACCtxPointer::reset(HMAC_CTX* ctx) { + ctx_.reset(ctx); +} + +HMAC_CTX* HMACCtxPointer::release() { + return ctx_.release(); +} + +bool HMACCtxPointer::init(const Buffer& buf, const EVP_MD* md) { + if (!ctx_) return false; + return HMAC_Init_ex(ctx_.get(), buf.data, buf.len, md, nullptr) == 1; +} + +bool HMACCtxPointer::update(const Buffer& buf) { + if (!ctx_) return false; + return HMAC_Update(ctx_.get(), + static_cast(buf.data), + buf.len) == 1; +} + +DataPointer HMACCtxPointer::digest() { + auto data = DataPointer::Alloc(EVP_MAX_MD_SIZE); + if (!data) return {}; + Buffer buf = data; + if (!digestInto(&buf)) return {}; + return data.resize(buf.len); +} + +bool HMACCtxPointer::digestInto(Buffer* buf) { + if (!ctx_) return false; + + unsigned int len = buf->len; + if (!HMAC_Final(ctx_.get(), static_cast(buf->data), &len)) { + return false; + } + buf->len = len; + return true; +} + +HMACCtxPointer HMACCtxPointer::New() { + return HMACCtxPointer(HMAC_CTX_new()); +} + +DataPointer hashDigest(const Buffer& buf, + const EVP_MD* md) { + if (md == nullptr) return {}; + size_t md_len = EVP_MD_size(md); + unsigned int result_size; + auto data = DataPointer::Alloc(md_len); + if (!data) return {}; + + if (!EVP_Digest(buf.data, + buf.len, + reinterpret_cast(data.get()), + &result_size, + md, + nullptr)) { + return {}; + } + + return data.resize(result_size); +} + } // namespace ncrypto diff --git a/deps/ncrypto/ncrypto.h b/deps/ncrypto/ncrypto.h index e5bf2b529bf239..75ac9fd8d705aa 100644 --- a/deps/ncrypto/ncrypto.h +++ b/deps/ncrypto/ncrypto.h @@ -201,18 +201,28 @@ struct FunctionDeleter { template using DeleteFnPtr = typename FunctionDeleter::Pointer; -using BignumCtxPointer = DeleteFnPtr; -using BignumGenCallbackPointer = DeleteFnPtr; -using EVPKeyCtxPointer = DeleteFnPtr; -using EVPMDCtxPointer = DeleteFnPtr; -using HMACCtxPointer = DeleteFnPtr; -using NetscapeSPKIPointer = DeleteFnPtr; using PKCS8Pointer = DeleteFnPtr; using RSAPointer = DeleteFnPtr; using SSLSessionPointer = DeleteFnPtr; +class BIOPointer; +class BignumPointer; class CipherCtxPointer; +class DataPointer; +class DHPointer; class ECKeyPointer; +class EVPKeyPointer; +class EVPMDCtxPointer; +class SSLCtxPointer; +class SSLPointer; +class X509View; +class X509Pointer; +class ECDSASigPointer; +class ECGroupPointer; +class ECPointPointer; +class ECKeyPointer; +class Rsa; +class Ec; struct StackOfXASN1Deleter { void operator()(STACK_OF(ASN1_OBJECT) * p) const { @@ -228,6 +238,9 @@ struct Buffer { size_t len = 0; }; +DataPointer hashDigest(const Buffer& data, + const EVP_MD* md); + class Cipher final { public: Cipher() = default; @@ -258,15 +271,108 @@ class Cipher final { static const Cipher FromNid(int nid); static const Cipher FromCtx(const CipherCtxPointer& ctx); + struct CipherParams { + int padding; + const EVP_MD* digest; + const Buffer label; + }; + + static DataPointer encrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + static DataPointer decrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + + static DataPointer sign(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + + static DataPointer recover(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + private: const EVP_CIPHER* cipher_ = nullptr; }; +// ============================================================================ +// RSA + +class Rsa final { + public: + Rsa(); + Rsa(OSSL3_CONST RSA* rsa); + NCRYPTO_DISALLOW_COPY_AND_MOVE(Rsa) + + inline operator bool() const { return rsa_ != nullptr; } + inline operator OSSL3_CONST RSA*() const { return rsa_; } + + struct PublicKey { + const BIGNUM* n; + const BIGNUM* e; + const BIGNUM* d; + }; + struct PrivateKey { + const BIGNUM* p; + const BIGNUM* q; + const BIGNUM* dp; + const BIGNUM* dq; + const BIGNUM* qi; + }; + struct PssParams { + std::string_view digest = "sha1"; + std::optional mgf1_digest = "sha1"; + int64_t salt_length = 20; + }; + + const PublicKey getPublicKey() const; + const PrivateKey getPrivateKey() const; + const std::optional getPssParams() const; + + bool setPublicKey(BignumPointer&& n, BignumPointer&& e); + bool setPrivateKey(BignumPointer&& d, + BignumPointer&& q, + BignumPointer&& p, + BignumPointer&& dp, + BignumPointer&& dq, + BignumPointer&& qi); + + using CipherParams = Cipher::CipherParams; + + static DataPointer encrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + static DataPointer decrypt(const EVPKeyPointer& key, + const CipherParams& params, + const Buffer in); + + private: + OSSL3_CONST RSA* rsa_; +}; + +class Ec final { + public: + Ec(); + Ec(OSSL3_CONST EC_KEY* key); + NCRYPTO_DISALLOW_COPY_AND_MOVE(Ec) + + const EC_GROUP* getGroup() const; + int getCurve() const; + + inline operator bool() const { return ec_ != nullptr; } + inline operator OSSL3_CONST EC_KEY*() const { return ec_; } + + private: + OSSL3_CONST EC_KEY* ec_ = nullptr; +}; + // A managed pointer to a buffer of data. When destroyed the underlying // buffer will be freed. class DataPointer final { public: static DataPointer Alloc(size_t len); + static DataPointer Copy(const Buffer& buffer); DataPointer() = default; explicit DataPointer(void* data, size_t len); @@ -283,6 +389,11 @@ class DataPointer final { void reset(void* data = nullptr, size_t len = 0); void reset(const Buffer& buffer); + // Sets the underlying data buffer to all zeros. + void zero(); + + DataPointer resize(size_t len); + // Releases ownership of the underlying data buffer. It is the caller's // responsibility to ensure the buffer is appropriately freed. Buffer release(); @@ -471,6 +582,74 @@ class CipherCtxPointer final { DeleteFnPtr ctx_; }; +class EVPKeyCtxPointer final { + public: + EVPKeyCtxPointer(); + explicit EVPKeyCtxPointer(EVP_PKEY_CTX* ctx); + EVPKeyCtxPointer(EVPKeyCtxPointer&& other) noexcept; + EVPKeyCtxPointer& operator=(EVPKeyCtxPointer&& other) noexcept; + NCRYPTO_DISALLOW_COPY(EVPKeyCtxPointer) + ~EVPKeyCtxPointer(); + + inline bool operator==(std::nullptr_t) const noexcept { + return ctx_ == nullptr; + } + inline operator bool() const { return ctx_ != nullptr; } + inline EVP_PKEY_CTX* get() const { return ctx_.get(); } + void reset(EVP_PKEY_CTX* ctx = nullptr); + EVP_PKEY_CTX* release(); + + bool initForDerive(const EVPKeyPointer& peer); + DataPointer derive() const; + + bool initForParamgen(); + bool setDhParameters(int prime_size, uint32_t generator); + bool setDsaParameters(uint32_t bits, std::optional q_bits); + bool setEcParameters(int curve, int encoding); + + bool setRsaOaepMd(const EVP_MD* md); + bool setRsaMgf1Md(const EVP_MD* md); + bool setRsaPadding(int padding); + bool setRsaKeygenPubExp(BignumPointer&& e); + bool setRsaKeygenBits(int bits); + bool setRsaPssKeygenMd(const EVP_MD* md); + bool setRsaPssKeygenMgf1Md(const EVP_MD* md); + bool setRsaPssSaltlen(int salt_len); + bool setRsaImplicitRejection(); + bool setRsaOaepLabel(DataPointer&& data); + + bool setSignatureMd(const EVPMDCtxPointer& md); + + bool publicCheck() const; + bool privateCheck() const; + + bool verify(const Buffer& sig, + const Buffer& data); + DataPointer sign(const Buffer& data); + bool signInto(const Buffer& data, + Buffer* sig); + + static constexpr int kDefaultRsaExponent = 0x10001; + + static bool setRsaPadding(EVP_PKEY_CTX* ctx, + int padding, + std::optional salt_len = std::nullopt); + + EVPKeyPointer paramgen() const; + + bool initForEncrypt(); + bool initForDecrypt(); + bool initForKeygen(); + int initForVerify(); + int initForSign(); + + static EVPKeyCtxPointer New(const EVPKeyPointer& key); + static EVPKeyCtxPointer NewFromID(int id); + + private: + DeleteFnPtr ctx_; +}; + class EVPKeyPointer final { public: static EVPKeyPointer New(); @@ -478,6 +657,8 @@ class EVPKeyPointer final { const Buffer& data); static EVPKeyPointer NewRawPrivate(int id, const Buffer& data); + static EVPKeyPointer NewDH(DHPointer&& dh); + static EVPKeyPointer NewRSA(RSAPointer&& rsa); enum class PKEncodingType { // RSAPublicKey / RSAPrivateKey according to PKCS#1. @@ -578,6 +759,15 @@ class EVPKeyPointer final { static bool IsRSAPrivateKey(const Buffer& buffer); + std::optional getBytesOfRS() const; + int getDefaultSignPadding() const; + operator Rsa() const; + + bool isRsaVariant() const; + bool isOneShotVariant() const; + bool isSigVariant() const; + bool validateDsaParameters() const; + private: DeleteFnPtr pkey_; }; @@ -663,9 +853,6 @@ struct StackOfX509Deleter { }; using StackOfX509 = std::unique_ptr; -class X509Pointer; -class X509View; - class SSLCtxPointer final { public: SSLCtxPointer() = default; @@ -792,6 +979,14 @@ class X509View final { CheckMatch checkEmail(const std::string_view email, int flags) const; CheckMatch checkIp(const std::string_view ip, int flags) const; + using UsageCallback = std::function; + bool enumUsages(UsageCallback callback) const; + + template + using KeyCallback = std::function; + bool ifRsa(KeyCallback callback) const; + bool ifEc(KeyCallback callback) const; + private: const X509* cert_ = nullptr; }; @@ -948,6 +1143,77 @@ class ECKeyPointer final { DeleteFnPtr key_; }; +class EVPMDCtxPointer final { + public: + EVPMDCtxPointer(); + explicit EVPMDCtxPointer(EVP_MD_CTX* ctx); + EVPMDCtxPointer(EVPMDCtxPointer&& other) noexcept; + EVPMDCtxPointer& operator=(EVPMDCtxPointer&& other) noexcept; + NCRYPTO_DISALLOW_COPY(EVPMDCtxPointer) + ~EVPMDCtxPointer(); + + inline bool operator==(std::nullptr_t) noexcept { return ctx_ == nullptr; } + inline operator bool() const { return ctx_ != nullptr; } + inline EVP_MD_CTX* get() const { return ctx_.get(); } + inline operator EVP_MD_CTX*() const { return ctx_.get(); } + void reset(EVP_MD_CTX* ctx = nullptr); + EVP_MD_CTX* release(); + + bool digestInit(const EVP_MD* digest); + bool digestUpdate(const Buffer& in); + DataPointer digestFinal(size_t length); + bool digestFinalInto(Buffer* buf); + size_t getExpectedSize(); + + std::optional signInit(const EVPKeyPointer& key, + const EVP_MD* digest); + std::optional verifyInit(const EVPKeyPointer& key, + const EVP_MD* digest); + + DataPointer signOneShot(const Buffer& buf) const; + DataPointer sign(const Buffer& buf) const; + bool verify(const Buffer& buf, + const Buffer& sig) const; + + const EVP_MD* getDigest() const; + size_t getDigestSize() const; + bool hasXofFlag() const; + + bool copyTo(const EVPMDCtxPointer& other) const; + + static EVPMDCtxPointer New(); + + private: + DeleteFnPtr ctx_; +}; + +class HMACCtxPointer final { + public: + HMACCtxPointer(); + explicit HMACCtxPointer(HMAC_CTX* ctx); + HMACCtxPointer(HMACCtxPointer&& other) noexcept; + HMACCtxPointer& operator=(HMACCtxPointer&& other) noexcept; + NCRYPTO_DISALLOW_COPY(HMACCtxPointer) + ~HMACCtxPointer(); + + inline bool operator==(std::nullptr_t) noexcept { return ctx_ == nullptr; } + inline operator bool() const { return ctx_ != nullptr; } + inline HMAC_CTX* get() const { return ctx_.get(); } + inline operator HMAC_CTX*() const { return ctx_.get(); } + void reset(HMAC_CTX* ctx = nullptr); + HMAC_CTX* release(); + + bool init(const Buffer& buf, const EVP_MD* md); + bool update(const Buffer& buf); + DataPointer digest(); + bool digestInto(Buffer* buf); + + static HMACCtxPointer New(); + + private: + DeleteFnPtr ctx_; +}; + #ifndef OPENSSL_NO_ENGINE class EnginePointer final { public: @@ -1025,12 +1291,17 @@ Buffer ExportChallenge(const char* input, size_t length); // KDF const EVP_MD* getDigestByName(const std::string_view name); +const EVP_CIPHER* getCipherByName(const std::string_view name); // Verify that the specified HKDF output length is valid for the given digest. // The maximum length for HKDF output for a given digest is 255 times the // hash size for the given digest algorithm. bool checkHkdfLength(const EVP_MD* md, size_t length); +bool extractP1363(const Buffer& buf, + unsigned char* dest, + size_t n); + DataPointer hkdf(const EVP_MD* md, const Buffer& key, const Buffer& info, diff --git a/src/crypto/crypto_cipher.cc b/src/crypto/crypto_cipher.cc index 61dd1e97d9672a..dca59f16723ef8 100644 --- a/src/crypto/crypto_cipher.cc +++ b/src/crypto/crypto_cipher.cc @@ -20,6 +20,7 @@ using ncrypto::SSLPointer; using v8::Array; using v8::ArrayBuffer; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Context; using v8::FunctionCallbackInfo; using v8::FunctionTemplate; @@ -244,26 +245,22 @@ void CipherBase::Initialize(Environment* env, Local target) { target, "publicEncrypt", PublicKeyCipher::Cipher); + ncrypto::Cipher::encrypt>); SetMethod(context, target, "privateDecrypt", PublicKeyCipher::Cipher); + ncrypto::Cipher::decrypt>); SetMethod(context, target, "privateEncrypt", PublicKeyCipher::Cipher); + ncrypto::Cipher::sign>); SetMethod(context, target, "publicDecrypt", PublicKeyCipher::Cipher); + ncrypto::Cipher::recover>); SetMethodNoSideEffect(context, target, "getCipherInfo", GetCipherInfo); @@ -288,17 +285,13 @@ void CipherBase::RegisterExternalReferences( registry->Register(GetCiphers); registry->Register(PublicKeyCipher::Cipher); + ncrypto::Cipher::encrypt>); registry->Register(PublicKeyCipher::Cipher); + ncrypto::Cipher::decrypt>); registry->Register(PublicKeyCipher::Cipher); + ncrypto::Cipher::sign>); registry->Register(PublicKeyCipher::Cipher); + ncrypto::Cipher::recover>); registry->Register(GetCipherInfo); } @@ -773,10 +766,10 @@ CipherBase::UpdateResult CipherBase::Update( return kErrorState; } - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env()->isolate_data()); - *out = ArrayBuffer::NewBackingStore(env()->isolate(), buf_len); - } + *out = ArrayBuffer::NewBackingStore( + env()->isolate(), + buf_len, + BackingStoreInitializationMode::kUninitialized); buffer = { .data = reinterpret_cast(data), @@ -853,11 +846,10 @@ bool CipherBase::Final(std::unique_ptr* out) { const int mode = ctx_.getMode(); - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env()->isolate_data()); - *out = ArrayBuffer::NewBackingStore( - env()->isolate(), static_cast(ctx_.getBlockSize())); - } + *out = ArrayBuffer::NewBackingStore( + env()->isolate(), + static_cast(ctx_.getBlockSize()), + BackingStoreInitializationMode::kUninitialized); if (kind_ == kDecipher && Cipher::FromCtx(ctx_).isSupportedAuthenticatedMode()) { @@ -939,9 +931,7 @@ void CipherBase::Final(const FunctionCallbackInfo& args) { Buffer::New(env, ab, 0, ab->ByteLength()).FromMaybe(Local())); } -template +template bool PublicKeyCipher::Cipher( Environment* env, const EVPKeyPointer& pkey, @@ -950,62 +940,32 @@ bool PublicKeyCipher::Cipher( const ArrayBufferOrViewContents& oaep_label, const ArrayBufferOrViewContents& data, std::unique_ptr* out) { - EVPKeyCtxPointer ctx = pkey.newCtx(); - if (!ctx) - return false; - if (EVP_PKEY_cipher_init(ctx.get()) <= 0) - return false; - if (EVP_PKEY_CTX_set_rsa_padding(ctx.get(), padding) <= 0) - return false; - - if (digest != nullptr) { - if (EVP_PKEY_CTX_set_rsa_oaep_md(ctx.get(), digest) <= 0) - return false; - } - - if (!SetRsaOaepLabel(ctx, oaep_label.ToByteSource())) return false; + auto label = oaep_label.ToByteSource(); + auto in = data.ToByteSource(); - size_t out_len = 0; - if (EVP_PKEY_cipher( - ctx.get(), - nullptr, - &out_len, - data.data(), - data.size()) <= 0) { - return false; - } - - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - *out = ArrayBuffer::NewBackingStore(env->isolate(), out_len); - } + const ncrypto::Cipher::CipherParams params{ + .padding = padding, + .digest = digest, + .label = label, + }; - if (EVP_PKEY_cipher( - ctx.get(), - static_cast((*out)->Data()), - &out_len, - data.data(), - data.size()) <= 0) { - return false; - } + auto buf = cipher(pkey, params, in); + if (!buf) return false; - CHECK_LE(out_len, (*out)->ByteLength()); - if (out_len == 0) { + if (buf.size() == 0) { *out = ArrayBuffer::NewBackingStore(env->isolate(), 0); - } else if (out_len != (*out)->ByteLength()) { - std::unique_ptr old_out = std::move(*out); - *out = ArrayBuffer::NewBackingStore(env->isolate(), out_len); + } else { + *out = ArrayBuffer::NewBackingStore(env->isolate(), buf.size()); memcpy(static_cast((*out)->Data()), - static_cast(old_out->Data()), - out_len); + static_cast(buf.get()), + buf.size()); } return true; } template + PublicKeyCipher::Cipher_t cipher> void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { MarkPopErrorOnReturn mark_pop_error_on_return; Environment* env = Environment::GetCurrent(args); @@ -1024,25 +984,16 @@ void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { uint32_t padding; if (!args[offset + 1]->Uint32Value(env->context()).To(&padding)) return; - if (EVP_PKEY_cipher == EVP_PKEY_decrypt && + if (cipher == ncrypto::Cipher::decrypt && operation == PublicKeyCipher::kPrivate && padding == RSA_PKCS1_PADDING) { EVPKeyCtxPointer ctx = pkey.newCtx(); CHECK(ctx); - if (EVP_PKEY_decrypt_init(ctx.get()) <= 0) { + if (!ctx.initForDecrypt()) { return ThrowCryptoError(env, ERR_get_error()); } - int rsa_pkcs1_implicit_rejection = - EVP_PKEY_CTX_ctrl_str(ctx.get(), "rsa_pkcs1_implicit_rejection", "1"); - // From the doc -2 means that the option is not supported. - // The default for the option is enabled and if it has been - // specifically disabled we want to respect that so we will - // not throw an error if the option is supported regardless - // of how it is set. The call to set the value - // will not affect what is used since a different context is - // used in the call if the option is supported - if (rsa_pkcs1_implicit_rejection <= 0) { + if (!ctx.setRsaImplicitRejection()) { return THROW_ERR_INVALID_ARG_VALUE( env, "RSA_PKCS1_PADDING is no longer supported for private decryption"); @@ -1052,7 +1003,7 @@ void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { const EVP_MD* digest = nullptr; if (args[offset + 2]->IsString()) { const Utf8Value oaep_str(env->isolate(), args[offset + 2]); - digest = EVP_get_digestbyname(*oaep_str); + digest = ncrypto::getDigestByName(oaep_str.ToStringView()); if (digest == nullptr) return THROW_ERR_OSSL_EVP_INVALID_DIGEST(env); } @@ -1063,8 +1014,7 @@ void PublicKeyCipher::Cipher(const FunctionCallbackInfo& args) { return THROW_ERR_OUT_OF_RANGE(env, "oaepLabel is too big"); } std::unique_ptr out; - if (!Cipher( - env, pkey, padding, digest, oaep_label, buf, &out)) { + if (!Cipher(env, pkey, padding, digest, oaep_label, buf, &out)) { return ThrowCryptoError(env, ERR_get_error()); } diff --git a/src/crypto/crypto_cipher.h b/src/crypto/crypto_cipher.h index 57c424e7509fa2..950acfa2521ede 100644 --- a/src/crypto/crypto_cipher.h +++ b/src/crypto/crypto_cipher.h @@ -96,19 +96,17 @@ class CipherBase : public BaseObject { class PublicKeyCipher { public: - typedef int (*EVP_PKEY_cipher_init_t)(EVP_PKEY_CTX* ctx); - typedef int (*EVP_PKEY_cipher_t)(EVP_PKEY_CTX* ctx, - unsigned char* out, size_t* outlen, - const unsigned char* in, size_t inlen); + using Cipher_t = + ncrypto::DataPointer(const ncrypto::EVPKeyPointer&, + const ncrypto::Cipher::CipherParams& params, + const ncrypto::Buffer); enum Operation { kPublic, kPrivate }; - template + template static bool Cipher(Environment* env, const ncrypto::EVPKeyPointer& pkey, int padding, @@ -117,9 +115,7 @@ class PublicKeyCipher { const ArrayBufferOrViewContents& data, std::unique_ptr* out); - template + template static void Cipher(const v8::FunctionCallbackInfo& args); }; diff --git a/src/crypto/crypto_common.cc b/src/crypto/crypto_common.cc index d94f6e1c82c4a6..591509e735b943 100644 --- a/src/crypto/crypto_common.cc +++ b/src/crypto/crypto_common.cc @@ -36,7 +36,7 @@ using ncrypto::StackOfX509; using ncrypto::X509Pointer; using ncrypto::X509View; using v8::ArrayBuffer; -using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Context; using v8::EscapableHandleScope; using v8::Integer; @@ -307,11 +307,8 @@ MaybeLocal ECPointToBuffer(Environment* env, return MaybeLocal(); } - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), len); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), len, BackingStoreInitializationMode::kUninitialized); len = EC_POINT_point2oct(group, point, diff --git a/src/crypto/crypto_context.cc b/src/crypto/crypto_context.cc index c7574e67f03f03..da5cebb87a3d51 100644 --- a/src/crypto/crypto_context.cc +++ b/src/crypto/crypto_context.cc @@ -1451,7 +1451,7 @@ void SecureContext::GetCertificate(const FunctionCallbackInfo& args) { } // UseExtraCaCerts is called only once at the start of the Node.js process. -void UseExtraCaCerts(const std::string& file) { +void UseExtraCaCerts(std::string_view file) { extra_root_certs_file = file; } diff --git a/src/crypto/crypto_dh.cc b/src/crypto/crypto_dh.cc index 7041eb985d9f6d..0e25a937e175fa 100644 --- a/src/crypto/crypto_dh.cc +++ b/src/crypto/crypto_dh.cc @@ -397,31 +397,23 @@ EVPKeyCtxPointer DhKeyGenTraits::Setup(DhKeyPairGenConfig* params) { auto dh = DHPointer::New(std::move(prime), std::move(bn_g)); if (!dh) return {}; - key_params = EVPKeyPointer::New(); - CHECK(key_params); - CHECK_EQ(EVP_PKEY_assign_DH(key_params.get(), dh.release()), 1); + key_params = EVPKeyPointer::NewDH(std::move(dh)); } else if (int* prime_size = std::get_if(¶ms->params.prime)) { - EVPKeyCtxPointer param_ctx(EVP_PKEY_CTX_new_id(EVP_PKEY_DH, nullptr)); - EVP_PKEY* raw_params = nullptr; - if (!param_ctx || - EVP_PKEY_paramgen_init(param_ctx.get()) <= 0 || - EVP_PKEY_CTX_set_dh_paramgen_prime_len( - param_ctx.get(), - *prime_size) <= 0 || - EVP_PKEY_CTX_set_dh_paramgen_generator( - param_ctx.get(), - params->params.generator) <= 0 || - EVP_PKEY_paramgen(param_ctx.get(), &raw_params) <= 0) { + auto param_ctx = EVPKeyCtxPointer::NewFromID(EVP_PKEY_DH); + if (!param_ctx.initForParamgen() || + !param_ctx.setDhParameters(*prime_size, params->params.generator)) { return {}; } - key_params = EVPKeyPointer(raw_params); + key_params = param_ctx.paramgen(); } else { UNREACHABLE(); } + if (!key_params) return {}; + EVPKeyCtxPointer ctx = key_params.newCtx(); - if (!ctx || EVP_PKEY_keygen_init(ctx.get()) <= 0) return {}; + if (!ctx.initForKeygen()) return {}; return ctx; } diff --git a/src/crypto/crypto_dsa.cc b/src/crypto/crypto_dsa.cc index 471fee77531139..cac05aa55f8dd2 100644 --- a/src/crypto/crypto_dsa.cc +++ b/src/crypto/crypto_dsa.cc @@ -12,22 +12,10 @@ #include -// EVP_PKEY_CTX_set_dsa_paramgen_q_bits was added in OpenSSL 1.1.1e. -#if OPENSSL_VERSION_NUMBER < 0x1010105fL -#define EVP_PKEY_CTX_set_dsa_paramgen_q_bits(ctx, qbits) \ - EVP_PKEY_CTX_ctrl((ctx), \ - EVP_PKEY_DSA, \ - EVP_PKEY_OP_PARAMGEN, \ - EVP_PKEY_CTRL_DSA_PARAMGEN_Q_BITS, \ - (qbits), \ - nullptr) -#endif - namespace node { using ncrypto::BignumPointer; using ncrypto::EVPKeyCtxPointer; -using ncrypto::EVPKeyPointer; using v8::FunctionCallbackInfo; using v8::Int32; using v8::JustVoid; @@ -41,33 +29,22 @@ using v8::Value; namespace crypto { EVPKeyCtxPointer DsaKeyGenTraits::Setup(DsaKeyPairGenConfig* params) { - EVPKeyCtxPointer param_ctx(EVP_PKEY_CTX_new_id(EVP_PKEY_DSA, nullptr)); - EVP_PKEY* raw_params = nullptr; - - if (!param_ctx || - EVP_PKEY_paramgen_init(param_ctx.get()) <= 0 || - EVP_PKEY_CTX_set_dsa_paramgen_bits( - param_ctx.get(), - params->params.modulus_bits) <= 0) { - return EVPKeyCtxPointer(); - } - - if (params->params.divisor_bits != -1) { - if (EVP_PKEY_CTX_set_dsa_paramgen_q_bits( - param_ctx.get(), params->params.divisor_bits) <= 0) { - return EVPKeyCtxPointer(); - } + auto param_ctx = EVPKeyCtxPointer::NewFromID(EVP_PKEY_DSA); + + if (!param_ctx.initForParamgen() || + !param_ctx.setDsaParameters( + params->params.modulus_bits, + params->params.divisor_bits != -1 + ? std::optional(params->params.divisor_bits) + : std::nullopt)) { + return {}; } - if (EVP_PKEY_paramgen(param_ctx.get(), &raw_params) <= 0) - return EVPKeyCtxPointer(); + auto key_params = param_ctx.paramgen(); + if (!key_params) return {}; - EVPKeyPointer key_params(raw_params); EVPKeyCtxPointer key_ctx = key_params.newCtx(); - - if (!key_ctx || EVP_PKEY_keygen_init(key_ctx.get()) <= 0) - return EVPKeyCtxPointer(); - + if (!key_ctx.initForKeygen()) return {}; return key_ctx; } diff --git a/src/crypto/crypto_ec.cc b/src/crypto/crypto_ec.cc index 5ccda6f0768873..98f1e1312769ca 100644 --- a/src/crypto/crypto_ec.cc +++ b/src/crypto/crypto_ec.cc @@ -28,7 +28,7 @@ using ncrypto::EVPKeyPointer; using ncrypto::MarkPopErrorOnReturn; using v8::Array; using v8::ArrayBuffer; -using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Context; using v8::FunctionCallbackInfo; using v8::FunctionTemplate; @@ -201,14 +201,10 @@ void ECDH::ComputeSecret(const FunctionCallbackInfo& args) { return; } - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - // NOTE: field_size is in bits - int field_size = EC_GROUP_get_degree(ecdh->group_); - size_t out_len = (field_size + 7) / 8; - bs = ArrayBuffer::NewBackingStore(env->isolate(), out_len); - } + int field_size = EC_GROUP_get_degree(ecdh->group_); + size_t out_len = (field_size + 7) / 8; + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), out_len, BackingStoreInitializationMode::kUninitialized); if (!ECDH_compute_key( bs->Data(), bs->ByteLength(), pub, ecdh->key_.get(), nullptr)) @@ -257,12 +253,11 @@ void ECDH::GetPrivateKey(const FunctionCallbackInfo& args) { return THROW_ERR_CRYPTO_OPERATION_FAILED(env, "Failed to get ECDH private key"); - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), - BignumPointer::GetByteCount(b)); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), + BignumPointer::GetByteCount(b), + BackingStoreInitializationMode::kUninitialized); + CHECK_EQ(bs->ByteLength(), BignumPointer::EncodePaddedInto( b, static_cast(bs->Data()), bs->ByteLength())); @@ -459,24 +454,14 @@ bool ECDHBitsTraits::DeriveBits(Environment* env, case EVP_PKEY_X25519: // Fall through case EVP_PKEY_X448: { - EVPKeyCtxPointer ctx = m_privkey.newCtx(); Mutex::ScopedLock pub_lock(params.public_.mutex()); - if (EVP_PKEY_derive_init(ctx.get()) <= 0 || - EVP_PKEY_derive_set_peer( - ctx.get(), - m_pubkey.get()) <= 0 || - EVP_PKEY_derive(ctx.get(), nullptr, &len) <= 0) { - return false; - } - - ByteSource::Builder buf(len); - - if (EVP_PKEY_derive(ctx.get(), buf.data(), &len) <= 0) { - return false; - } + EVPKeyCtxPointer ctx = m_privkey.newCtx(); + if (!ctx.initForDerive(m_pubkey)) return false; - *out = std::move(buf).release(len); + auto data = ctx.derive(); + if (!data) return false; + *out = ByteSource::Allocated(data.release()); break; } default: { @@ -523,28 +508,24 @@ EVPKeyCtxPointer EcKeyGenTraits::Setup(EcKeyPairGenConfig* params) { case EVP_PKEY_X25519: // Fall through case EVP_PKEY_X448: - key_ctx.reset(EVP_PKEY_CTX_new_id(params->params.curve_nid, nullptr)); + key_ctx = EVPKeyCtxPointer::NewFromID(params->params.curve_nid); break; default: { - EVPKeyCtxPointer param_ctx(EVP_PKEY_CTX_new_id(EVP_PKEY_EC, nullptr)); - EVP_PKEY* raw_params = nullptr; - if (!param_ctx || - EVP_PKEY_paramgen_init(param_ctx.get()) <= 0 || - EVP_PKEY_CTX_set_ec_paramgen_curve_nid( - param_ctx.get(), params->params.curve_nid) <= 0 || - EVP_PKEY_CTX_set_ec_param_enc( - param_ctx.get(), params->params.param_encoding) <= 0 || - EVP_PKEY_paramgen(param_ctx.get(), &raw_params) <= 0) { - return EVPKeyCtxPointer(); + auto param_ctx = EVPKeyCtxPointer::NewFromID(EVP_PKEY_EC); + if (!param_ctx.initForParamgen() || + !param_ctx.setEcParameters(params->params.curve_nid, + params->params.param_encoding)) { + return {}; } - EVPKeyPointer key_params(raw_params); + + auto key_params = param_ctx.paramgen(); + if (!key_params) return {}; + key_ctx = key_params.newCtx(); } } - if (key_ctx && EVP_PKEY_keygen_init(key_ctx.get()) <= 0) - key_ctx.reset(); - + if (!key_ctx.initForKeygen()) return {}; return key_ctx; } diff --git a/src/crypto/crypto_hash.cc b/src/crypto/crypto_hash.cc index bcd4c533b07ceb..851847483327c1 100644 --- a/src/crypto/crypto_hash.cc +++ b/src/crypto/crypto_hash.cc @@ -11,6 +11,7 @@ namespace node { +using ncrypto::DataPointer; using ncrypto::EVPMDCtxPointer; using ncrypto::MarkPopErrorOnReturn; using v8::Context; @@ -59,7 +60,7 @@ struct MaybeCachedMD { }; MaybeCachedMD FetchAndMaybeCacheMD(Environment* env, const char* search_name) { - const EVP_MD* implicit_md = EVP_get_digestbyname(search_name); + const EVP_MD* implicit_md = ncrypto::getDigestByName(search_name); if (!implicit_md) return {nullptr, nullptr, -1}; const char* real_name = EVP_MD_get0_name(implicit_md); @@ -202,7 +203,7 @@ const EVP_MD* GetDigestImplementation(Environment* env, return result.explicit_md ? result.explicit_md : result.implicit_md; #else Utf8Value utf8(env->isolate(), algorithm); - return EVP_get_digestbyname(*utf8); + return ncrypto::getDigestByName(utf8.ToStringView()); #endif } @@ -220,7 +221,7 @@ void Hash::OneShotDigest(const FunctionCallbackInfo& args) { CHECK(args[5]->IsUint32() || args[5]->IsUndefined()); // outputEncodingId const EVP_MD* md = GetDigestImplementation(env, args[0], args[1], args[2]); - if (md == nullptr) { + if (md == nullptr) [[unlikely]] { Utf8Value method(isolate, args[0]); std::string message = "Digest method " + method.ToString() + " is not supported"; @@ -229,41 +230,36 @@ void Hash::OneShotDigest(const FunctionCallbackInfo& args) { enum encoding output_enc = ParseEncoding(isolate, args[4], args[5], HEX); - int md_len = EVP_MD_size(md); - unsigned int result_size; - ByteSource::Builder output(md_len); - int success; - // On smaller inputs, EVP_Digest() can be slower than the - // deprecated helpers e.g SHA256_XXX. The speedup may not - // be worth using deprecated APIs, however, so we use - // EVP_Digest(), unless there's a better alternative - // in the future. - // https://github.com/openssl/openssl/issues/19612 - if (args[3]->IsString()) { - Utf8Value utf8(isolate, args[3]); - success = EVP_Digest(utf8.out(), - utf8.length(), - output.data(), - &result_size, - md, - nullptr); - } else { + DataPointer output = ([&] { + if (args[3]->IsString()) { + Utf8Value utf8(isolate, args[3]); + ncrypto::Buffer buf{ + .data = reinterpret_cast(utf8.out()), + .len = utf8.length(), + }; + return ncrypto::hashDigest(buf, md); + } + ArrayBufferViewContents input(args[3]); - success = EVP_Digest(input.data(), - input.length(), - output.data(), - &result_size, - md, - nullptr); - } - if (!success) { + ncrypto::Buffer buf{ + .data = reinterpret_cast(input.data()), + .len = input.length(), + }; + return ncrypto::hashDigest(buf, md); + })(); + + if (!output) [[unlikely]] { return ThrowCryptoError(env, ERR_get_error()); } Local error; - MaybeLocal rc = StringBytes::Encode( - env->isolate(), output.data(), md_len, output_enc, &error); - if (rc.IsEmpty()) { + MaybeLocal rc = + StringBytes::Encode(env->isolate(), + static_cast(output.get()), + output.size(), + output_enc, + &error); + if (rc.IsEmpty()) [[unlikely]] { CHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; @@ -314,7 +310,8 @@ void Hash::New(const FunctionCallbackInfo& args) { const EVP_MD* md = nullptr; if (args[0]->IsObject()) { ASSIGN_OR_RETURN_UNWRAP(&orig, args[0].As()); - md = EVP_MD_CTX_md(orig->mdctx_.get()); + CHECK_NOT_NULL(orig); + md = orig->mdctx_.getDigest(); } else { md = GetDigestImplementation(env, args[0], args[2], args[3]); } @@ -331,25 +328,25 @@ void Hash::New(const FunctionCallbackInfo& args) { "Digest method not supported"); } - if (orig != nullptr && - 0 >= EVP_MD_CTX_copy(hash->mdctx_.get(), orig->mdctx_.get())) { + if (orig != nullptr && !orig->mdctx_.copyTo(hash->mdctx_)) { return ThrowCryptoError(env, ERR_get_error(), "Digest copy error"); } } bool Hash::HashInit(const EVP_MD* md, Maybe xof_md_len) { - mdctx_.reset(EVP_MD_CTX_new()); - if (!mdctx_ || EVP_DigestInit_ex(mdctx_.get(), md, nullptr) <= 0) { + mdctx_ = EVPMDCtxPointer::New(); + if (!mdctx_.digestInit(md)) [[unlikely]] { mdctx_.reset(); return false; } - md_len_ = EVP_MD_size(md); + md_len_ = mdctx_.getDigestSize(); if (xof_md_len.IsJust() && xof_md_len.FromJust() != md_len_) { // This is a little hack to cause createHash to fail when an incorrect // hashSize option was passed for a non-XOF hash function. - if ((EVP_MD_flags(md) & EVP_MD_FLAG_XOF) == 0) { + if (!mdctx_.hasXofFlag()) [[unlikely]] { EVPerr(EVP_F_EVP_DIGESTFINALXOF, EVP_R_NOT_XOF_OR_INVALID_LENGTH); + mdctx_.reset(); return false; } md_len_ = xof_md_len.FromJust(); @@ -359,9 +356,11 @@ bool Hash::HashInit(const EVP_MD* md, Maybe xof_md_len) { } bool Hash::HashUpdate(const char* data, size_t len) { - if (!mdctx_) - return false; - return EVP_DigestUpdate(mdctx_.get(), data, len) == 1; + if (!mdctx_) return false; + return mdctx_.digestUpdate(ncrypto::Buffer{ + .data = data, + .len = len, + }); } void Hash::HashUpdate(const FunctionCallbackInfo& args) { @@ -402,31 +401,18 @@ void Hash::HashDigest(const FunctionCallbackInfo& args) { // and Hash.digest can both be used to retrieve the digest, // so we need to cache it. // See https://github.com/nodejs/node/issues/28245. - - ByteSource::Builder digest(len); - - size_t default_len = EVP_MD_CTX_size(hash->mdctx_.get()); - int ret; - if (len == default_len) { - ret = EVP_DigestFinal_ex( - hash->mdctx_.get(), digest.data(), &len); - // The output length should always equal hash->md_len_ - CHECK_EQ(len, hash->md_len_); - } else { - ret = EVP_DigestFinalXOF( - hash->mdctx_.get(), digest.data(), len); - } - - if (ret != 1) + auto data = hash->mdctx_.digestFinal(len); + if (!data) [[unlikely]] { return ThrowCryptoError(env, ERR_get_error()); + } - hash->digest_ = std::move(digest).release(); + hash->digest_ = ByteSource::Allocated(data.release()); } Local error; MaybeLocal rc = StringBytes::Encode( env->isolate(), hash->digest_.data(), len, encoding, &error); - if (rc.IsEmpty()) { + if (rc.IsEmpty()) [[unlikely]] { CHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; @@ -469,7 +455,7 @@ Maybe HashTraits::AdditionalConfig( CHECK(args[offset]->IsString()); // Hash algorithm Utf8Value digest(env->isolate(), args[offset]); - params->digest = EVP_get_digestbyname(*digest); + params->digest = ncrypto::getDigestByName(digest.ToStringView()); if (params->digest == nullptr) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *digest); return Nothing(); @@ -492,7 +478,7 @@ Maybe HashTraits::AdditionalConfig( static_cast(args[offset + 2] .As()->Value()) / CHAR_BIT; if (params->length != expected) { - if ((EVP_MD_flags(params->digest) & EVP_MD_FLAG_XOF) == 0) { + if ((EVP_MD_flags(params->digest) & EVP_MD_FLAG_XOF) == 0) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Digest method not supported"); return Nothing(); } @@ -506,29 +492,19 @@ bool HashTraits::DeriveBits( Environment* env, const HashConfig& params, ByteSource* out) { - EVPMDCtxPointer ctx(EVP_MD_CTX_new()); + auto ctx = EVPMDCtxPointer::New(); - if (!ctx || EVP_DigestInit_ex(ctx.get(), params.digest, nullptr) <= 0 || - EVP_DigestUpdate(ctx.get(), params.in.data(), params.in.size()) <= - 0) [[unlikely]] { + if (!ctx.digestInit(params.digest) || !ctx.digestUpdate(params.in)) + [[unlikely]] { return false; } if (params.length > 0) [[likely]] { - unsigned int length = params.length; - ByteSource::Builder buf(length); - - size_t expected = EVP_MD_CTX_size(ctx.get()); - - int ret = - (length == expected) - ? EVP_DigestFinal_ex(ctx.get(), buf.data(), &length) - : EVP_DigestFinalXOF(ctx.get(), buf.data(), length); - - if (ret != 1) [[unlikely]] + auto data = ctx.digestFinal(params.length); + if (!data) [[unlikely]] return false; - *out = std::move(buf).release(); + *out = ByteSource::Allocated(data.release()); } return true; @@ -548,7 +524,7 @@ void InternalVerifyIntegrity(const v8::FunctionCallbackInfo& args) { CHECK(args[2]->IsArrayBufferView()); ArrayBufferOrViewContents expected(args[2]); - const EVP_MD* md_type = EVP_get_digestbyname(*algorithm); + const EVP_MD* md_type = ncrypto::getDigestByName(algorithm.ToStringView()); unsigned char digest[EVP_MAX_MD_SIZE]; unsigned int digest_size; if (md_type == nullptr || EVP_Digest(content.data(), @@ -556,7 +532,7 @@ void InternalVerifyIntegrity(const v8::FunctionCallbackInfo& args) { digest, &digest_size, md_type, - nullptr) != 1) { + nullptr) != 1) [[unlikely]] { return ThrowCryptoError( env, ERR_get_error(), "Digest method not supported"); } @@ -570,7 +546,7 @@ void InternalVerifyIntegrity(const v8::FunctionCallbackInfo& args) { digest_size, BASE64, &error); - if (rc.IsEmpty()) { + if (rc.IsEmpty()) [[unlikely]] { CHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; diff --git a/src/crypto/crypto_hkdf.cc b/src/crypto/crypto_hkdf.cc index 2a465c849def44..10bb8e4258bf63 100644 --- a/src/crypto/crypto_hkdf.cc +++ b/src/crypto/crypto_hkdf.cc @@ -55,7 +55,7 @@ Maybe HKDFTraits::AdditionalConfig( Utf8Value hash(env->isolate(), args[offset]); params->digest = ncrypto::getDigestByName(hash.ToStringView()); - if (params->digest == nullptr) { + if (params->digest == nullptr) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *hash); return Nothing(); } @@ -88,7 +88,7 @@ Maybe HKDFTraits::AdditionalConfig( // HKDF-Expand computes up to 255 HMAC blocks, each having as many bits as the // output of the hash function. 255 is a hard limit because HKDF appends an // 8-bit counter to each HMAC'd message, starting at 1. - if (!ncrypto::checkHkdfLength(params->digest, params->length)) { + if (!ncrypto::checkHkdfLength(params->digest, params->length)) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_KEYLEN(env); return Nothing(); } diff --git a/src/crypto/crypto_hmac.cc b/src/crypto/crypto_hmac.cc index 25ccb1b9d04e51..56a09e1a2d9b0f 100644 --- a/src/crypto/crypto_hmac.cc +++ b/src/crypto/crypto_hmac.cc @@ -70,15 +70,21 @@ void Hmac::New(const FunctionCallbackInfo& args) { void Hmac::HmacInit(const char* hash_type, const char* key, int key_len) { HandleScope scope(env()->isolate()); - const EVP_MD* md = EVP_get_digestbyname(hash_type); - if (md == nullptr) + const EVP_MD* md = ncrypto::getDigestByName(hash_type); + if (md == nullptr) [[unlikely]] { return THROW_ERR_CRYPTO_INVALID_DIGEST( env(), "Invalid digest: %s", hash_type); + } if (key_len == 0) { key = ""; } - ctx_.reset(HMAC_CTX_new()); - if (!ctx_ || !HMAC_Init_ex(ctx_.get(), key, key_len, md, nullptr)) { + + ctx_ = HMACCtxPointer::New(); + ncrypto::Buffer key_buf{ + .data = key, + .len = static_cast(key_len), + }; + if (!ctx_.init(key_buf, md)) [[unlikely]] { ctx_.reset(); return ThrowCryptoError(env(), ERR_get_error()); } @@ -95,9 +101,11 @@ void Hmac::HmacInit(const FunctionCallbackInfo& args) { } bool Hmac::HmacUpdate(const char* data, size_t len) { - return ctx_ && HMAC_Update(ctx_.get(), - reinterpret_cast(data), - len) == 1; + ncrypto::Buffer buf{ + .data = data, + .len = len, + }; + return ctx_.update(buf); } void Hmac::HmacUpdate(const FunctionCallbackInfo& args) { @@ -123,24 +131,27 @@ void Hmac::HmacDigest(const FunctionCallbackInfo& args) { } unsigned char md_value[EVP_MAX_MD_SIZE]; - unsigned int md_len = 0; + ncrypto::Buffer buf{ + .data = md_value, + .len = sizeof(md_value), + }; if (hmac->ctx_) { - bool ok = HMAC_Final(hmac->ctx_.get(), md_value, &md_len); - hmac->ctx_.reset(); - if (!ok) { + if (!hmac->ctx_.digestInto(&buf)) [[unlikely]] { + hmac->ctx_.reset(); return ThrowCryptoError(env, ERR_get_error(), "Failed to finalize HMAC"); } + hmac->ctx_.reset(); } Local error; MaybeLocal rc = StringBytes::Encode(env->isolate(), reinterpret_cast(md_value), - md_len, + buf.len, encoding, &error); - if (rc.IsEmpty()) { + if (rc.IsEmpty()) [[unlikely]] { CHECK(!error.IsEmpty()); env->isolate()->ThrowException(error); return; @@ -188,8 +199,8 @@ Maybe HmacTraits::AdditionalConfig( CHECK(args[offset + 2]->IsObject()); // Key Utf8Value digest(env->isolate(), args[offset + 1]); - params->digest = EVP_get_digestbyname(*digest); - if (params->digest == nullptr) { + params->digest = ncrypto::getDigestByName(digest.ToStringView()); + if (params->digest == nullptr) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *digest); return Nothing(); } @@ -225,31 +236,29 @@ bool HmacTraits::DeriveBits( Environment* env, const HmacConfig& params, ByteSource* out) { - HMACCtxPointer ctx(HMAC_CTX_new()); + auto ctx = HMACCtxPointer::New(); - if (!ctx || !HMAC_Init_ex(ctx.get(), - params.key.GetSymmetricKey(), - params.key.GetSymmetricKeySize(), - params.digest, - nullptr)) { + ncrypto::Buffer key_buf{ + .data = params.key.GetSymmetricKey(), + .len = params.key.GetSymmetricKeySize(), + }; + if (!ctx.init(key_buf, params.digest)) [[unlikely]] { return false; } - if (!HMAC_Update( - ctx.get(), - params.data.data(), - params.data.size())) { + ncrypto::Buffer buffer{ + .data = params.data.data(), + .len = params.data.size(), + }; + if (!ctx.update(buffer)) [[unlikely]] { return false; } - ByteSource::Builder buf(EVP_MAX_MD_SIZE); - unsigned int len; - - if (!HMAC_Final(ctx.get(), buf.data(), &len)) { + auto buf = ctx.digest(); + if (!buf) [[unlikely]] return false; - } - *out = std::move(buf).release(len); + *out = ByteSource::Allocated(buf.release()); return true; } @@ -258,9 +267,9 @@ MaybeLocal HmacTraits::EncodeOutput(Environment* env, const HmacConfig& params, ByteSource* out) { switch (params.mode) { - case SignConfiguration::kSign: + case SignConfiguration::Mode::Sign: return out->ToArrayBuffer(env); - case SignConfiguration::kVerify: + case SignConfiguration::Mode::Verify: return Boolean::New( env->isolate(), out->size() > 0 && out->size() == params.signature.size() && diff --git a/src/crypto/crypto_keygen.cc b/src/crypto/crypto_keygen.cc index 246191f5d51796..7c3a85e9f8a24d 100644 --- a/src/crypto/crypto_keygen.cc +++ b/src/crypto/crypto_keygen.cc @@ -47,10 +47,8 @@ Maybe NidKeyPairGenTraits::AdditionalConfig( } EVPKeyCtxPointer NidKeyPairGenTraits::Setup(NidKeyPairGenConfig* params) { - EVPKeyCtxPointer ctx = - EVPKeyCtxPointer(EVP_PKEY_CTX_new_id(params->params.id, nullptr)); - if (!ctx || EVP_PKEY_keygen_init(ctx.get()) <= 0) return {}; - + auto ctx = EVPKeyCtxPointer::NewFromID(params->params.id); + if (!ctx || !ctx.initForKeygen()) return {}; return ctx; } diff --git a/src/crypto/crypto_keys.cc b/src/crypto/crypto_keys.cc index bedcf04d036478..2c55828facc35b 100644 --- a/src/crypto/crypto_keys.cc +++ b/src/crypto/crypto_keys.cc @@ -349,7 +349,7 @@ KeyObjectData::GetPrivateKeyEncodingFromJs( if (context != kKeyContextInput) { if (args[*offset]->IsString()) { Utf8Value cipher_name(env->isolate(), args[*offset]); - config.cipher = EVP_get_cipherbyname(*cipher_name); + config.cipher = ncrypto::getCipherByName(cipher_name.ToStringView()); if (config.cipher == nullptr) { THROW_ERR_CRYPTO_UNKNOWN_CIPHER(env); return Nothing(); @@ -597,7 +597,7 @@ bool KeyObjectHandle::HasInstance(Environment* env, Local value) { return !t.IsEmpty() && t->HasInstance(value); } -v8::Local KeyObjectHandle::Initialize(Environment* env) { +Local KeyObjectHandle::Initialize(Environment* env) { Local templ = env->crypto_key_object_handle_constructor(); if (templ.IsEmpty()) { Isolate* isolate = env->isolate(); @@ -958,14 +958,10 @@ bool KeyObjectHandle::CheckEcKeyData() const { CHECK_EQ(key.id(), EVP_PKEY_EC); if (data_.GetKeyType() == kKeyTypePrivate) { - return EVP_PKEY_check(ctx.get()) == 1; + return ctx.privateCheck(); } -#if OPENSSL_VERSION_MAJOR >= 3 - return EVP_PKEY_public_check_quick(ctx.get()) == 1; -#else - return EVP_PKEY_public_check(ctx.get()) == 1; -#endif + return ctx.publicCheck(); } void KeyObjectHandle::CheckEcKeyData(const FunctionCallbackInfo& args) { @@ -1202,6 +1198,9 @@ void Initialize(Environment* env, Local target) { constexpr int kKeyFormatJWK = static_cast(EVPKeyPointer::PKFormatType::JWK); + constexpr auto kSigEncDER = DSASigEnc::DER; + constexpr auto kSigEncP1363 = DSASigEnc::P1363; + NODE_DEFINE_CONSTANT(target, kWebCryptoKeyFormatRaw); NODE_DEFINE_CONSTANT(target, kWebCryptoKeyFormatPKCS8); NODE_DEFINE_CONSTANT(target, kWebCryptoKeyFormatSPKI); diff --git a/src/crypto/crypto_pbkdf2.cc b/src/crypto/crypto_pbkdf2.cc index dcaa430aacd3d7..1a0dff8238d938 100644 --- a/src/crypto/crypto_pbkdf2.cc +++ b/src/crypto/crypto_pbkdf2.cc @@ -88,20 +88,20 @@ Maybe PBKDF2Traits::AdditionalConfig( CHECK(args[offset + 4]->IsString()); // digest_name params->iterations = args[offset + 2].As()->Value(); - if (params->iterations < 0) { + if (params->iterations < 0) [[unlikely]] { THROW_ERR_OUT_OF_RANGE(env, "iterations must be <= %d", INT_MAX); return Nothing(); } params->length = args[offset + 3].As()->Value(); - if (params->length < 0) { + if (params->length < 0) [[unlikely]] { THROW_ERR_OUT_OF_RANGE(env, "length must be <= %d", INT_MAX); return Nothing(); } Utf8Value name(args.GetIsolate(), args[offset + 4]); params->digest = ncrypto::getDigestByName(name.ToStringView()); - if (params->digest == nullptr) { + if (params->digest == nullptr) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *name); return Nothing(); } diff --git a/src/crypto/crypto_random.cc b/src/crypto/crypto_random.cc index cb96698aa644c3..4e9ff906c06571 100644 --- a/src/crypto/crypto_random.cc +++ b/src/crypto/crypto_random.cc @@ -14,7 +14,6 @@ namespace node { using ncrypto::BignumPointer; using ncrypto::ClearErrorOnReturn; using v8::ArrayBuffer; -using v8::BackingStore; using v8::Boolean; using v8::FunctionCallbackInfo; using v8::Int32; @@ -25,6 +24,7 @@ using v8::MaybeLocal; using v8::Nothing; using v8::Object; using v8::Uint32; +using v8::Undefined; using v8::Value; namespace crypto { @@ -39,7 +39,7 @@ BignumPointer::PrimeCheckCallback getPrimeCheckCallback(Environment* env) { } // namespace MaybeLocal RandomBytesTraits::EncodeOutput( Environment* env, const RandomBytesConfig& params, ByteSource* unused) { - return v8::Undefined(env->isolate()); + return Undefined(env->isolate()); } Maybe RandomBytesTraits::AdditionalConfig( @@ -78,14 +78,13 @@ void RandomPrimeConfig::MemoryInfo(MemoryTracker* tracker) const { MaybeLocal RandomPrimeTraits::EncodeOutput( Environment* env, const RandomPrimeConfig& params, ByteSource* unused) { size_t size = params.prime.byteLength(); - std::shared_ptr store = - ArrayBuffer::NewBackingStore(env->isolate(), size); + auto store = ArrayBuffer::NewBackingStore(env->isolate(), size); CHECK_EQ(size, BignumPointer::EncodePaddedInto( params.prime.get(), reinterpret_cast(store->Data()), size)); - return ArrayBuffer::New(env->isolate(), store); + return ArrayBuffer::New(env->isolate(), std::move(store)); } Maybe RandomPrimeTraits::AdditionalConfig( @@ -104,7 +103,7 @@ Maybe RandomPrimeTraits::AdditionalConfig( if (!args[offset + 2]->IsUndefined()) { ArrayBufferOrViewContents add(args[offset + 2]); params->add.reset(add.data(), add.size()); - if (!params->add) { + if (!params->add) [[unlikely]] { THROW_ERR_CRYPTO_OPERATION_FAILED(env, "could not generate prime"); return Nothing(); } @@ -113,7 +112,7 @@ Maybe RandomPrimeTraits::AdditionalConfig( if (!args[offset + 3]->IsUndefined()) { ArrayBufferOrViewContents rem(args[offset + 3]); params->rem.reset(rem.data(), rem.size()); - if (!params->rem) { + if (!params->rem) [[unlikely]] { THROW_ERR_CRYPTO_OPERATION_FAILED(env, "could not generate prime"); return Nothing(); } @@ -124,7 +123,7 @@ Maybe RandomPrimeTraits::AdditionalConfig( CHECK_GT(bits, 0); if (params->add) { - if (BignumPointer::GetBitCount(params->add.get()) > bits) { + if (BignumPointer::GetBitCount(params->add.get()) > bits) [[unlikely]] { // If we allowed this, the best case would be returning a static prime // that wasn't generated randomly. The worst case would be an infinite // loop within OpenSSL, blocking the main thread or one of the threads @@ -133,7 +132,7 @@ Maybe RandomPrimeTraits::AdditionalConfig( return Nothing(); } - if (params->rem && params->add <= params->rem) { + if (params->rem && params->add <= params->rem) [[unlikely]] { // This would definitely lead to an infinite loop if allowed since // OpenSSL does not check this condition. THROW_ERR_OUT_OF_RANGE(env, "invalid options.rem"); @@ -144,7 +143,7 @@ Maybe RandomPrimeTraits::AdditionalConfig( params->bits = bits; params->safe = safe; params->prime = BignumPointer::NewSecure(); - if (!params->prime) { + if (!params->prime) [[unlikely]] { THROW_ERR_CRYPTO_OPERATION_FAILED(env, "could not generate prime"); return Nothing(); } @@ -195,7 +194,8 @@ bool CheckPrimeTraits::DeriveBits( const CheckPrimeConfig& params, ByteSource* out) { int ret = params.candidate.isPrime(params.checks, getPrimeCheckCallback(env)); - if (ret < 0) return false; + if (ret < 0) [[unlikely]] + return false; ByteSource::Builder buf(1); buf.data()[0] = ret; *out = std::move(buf).release(); diff --git a/src/crypto/crypto_rsa.cc b/src/crypto/crypto_rsa.cc index 05a3882c7e17d7..f0e0f9fd5f94a1 100644 --- a/src/crypto/crypto_rsa.cc +++ b/src/crypto/crypto_rsa.cc @@ -15,13 +15,15 @@ namespace node { using ncrypto::BignumPointer; +using ncrypto::DataPointer; using ncrypto::EVPKeyCtxPointer; using ncrypto::EVPKeyPointer; using ncrypto::RSAPointer; using v8::ArrayBuffer; -using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::FunctionCallbackInfo; using v8::Int32; +using v8::Integer; using v8::JustVoid; using v8::Local; using v8::Maybe; @@ -34,37 +36,28 @@ using v8::Value; namespace crypto { EVPKeyCtxPointer RsaKeyGenTraits::Setup(RsaKeyPairGenConfig* params) { - EVPKeyCtxPointer ctx( - EVP_PKEY_CTX_new_id( - params->params.variant == kKeyVariantRSA_PSS - ? EVP_PKEY_RSA_PSS - : EVP_PKEY_RSA, - nullptr)); - - if (EVP_PKEY_keygen_init(ctx.get()) <= 0) - return EVPKeyCtxPointer(); - - if (EVP_PKEY_CTX_set_rsa_keygen_bits( - ctx.get(), - params->params.modulus_bits) <= 0) { - return EVPKeyCtxPointer(); + auto ctx = EVPKeyCtxPointer::NewFromID( + params->params.variant == kKeyVariantRSA_PSS ? EVP_PKEY_RSA_PSS + : EVP_PKEY_RSA); + + if (!ctx.initForKeygen() || + !ctx.setRsaKeygenBits(params->params.modulus_bits)) { + return {}; } // 0x10001 is the default RSA exponent. - if (params->params.exponent != 0x10001) { + if (params->params.exponent != EVPKeyCtxPointer::kDefaultRsaExponent) { auto bn = BignumPointer::New(); - CHECK(bn.setWord(params->params.exponent)); - // EVP_CTX accepts ownership of bn on success. - if (EVP_PKEY_CTX_set_rsa_keygen_pubexp(ctx.get(), bn.get()) <= 0) - return EVPKeyCtxPointer(); - - bn.release(); + if (!bn.setWord(params->params.exponent) || + !ctx.setRsaKeygenPubExp(std::move(bn))) { + return {}; + } } if (params->params.variant == kKeyVariantRSA_PSS) { if (params->params.md != nullptr && - EVP_PKEY_CTX_set_rsa_pss_keygen_md(ctx.get(), params->params.md) <= 0) { - return EVPKeyCtxPointer(); + !ctx.setRsaPssKeygenMd(params->params.md)) { + return {}; } // TODO(tniessen): This appears to only be necessary in OpenSSL 3, while @@ -76,11 +69,8 @@ EVPKeyCtxPointer RsaKeyGenTraits::Setup(RsaKeyPairGenConfig* params) { mgf1_md = params->params.md; } - if (mgf1_md != nullptr && - EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md( - ctx.get(), - mgf1_md) <= 0) { - return EVPKeyCtxPointer(); + if (mgf1_md != nullptr && !ctx.setRsaPssKeygenMgf1Md(mgf1_md)) { + return {}; } int saltlen = params->params.saltlen; @@ -88,11 +78,8 @@ EVPKeyCtxPointer RsaKeyGenTraits::Setup(RsaKeyPairGenConfig* params) { saltlen = EVP_MD_size(params->params.md); } - if (saltlen >= 0 && - EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen( - ctx.get(), - saltlen) <= 0) { - return EVPKeyCtxPointer(); + if (saltlen >= 0 && !ctx.setRsaPssSaltlen(saltlen)) { + return {}; } } @@ -154,7 +141,7 @@ Maybe RsaKeyGenTraits::AdditionalConfig( if (!args[*offset]->IsUndefined()) { CHECK(args[*offset]->IsString()); Utf8Value digest(env->isolate(), args[*offset]); - params->params.md = EVP_get_digestbyname(*digest); + params->params.md = ncrypto::getDigestByName(digest.ToStringView()); if (params->params.md == nullptr) { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *digest); return Nothing(); @@ -164,7 +151,7 @@ Maybe RsaKeyGenTraits::AdditionalConfig( if (!args[*offset + 1]->IsUndefined()) { CHECK(args[*offset + 1]->IsString()); Utf8Value digest(env->isolate(), args[*offset + 1]); - params->params.mgf1_md = EVP_get_digestbyname(*digest); + params->params.mgf1_md = ncrypto::getDigestByName(digest.ToStringView()); if (params->params.mgf1_md == nullptr) { THROW_ERR_CRYPTO_INVALID_DIGEST( env, "Invalid MGF1 digest: %s", *digest); @@ -196,8 +183,11 @@ WebCryptoKeyExportStatus RSA_JWK_Export(const KeyObjectData& key_data, return WebCryptoKeyExportStatus::FAILED; } -template +using Cipher_t = DataPointer(const EVPKeyPointer& key, + const ncrypto::Rsa::CipherParams& params, + const ncrypto::Buffer in); + +template WebCryptoCipherStatus RSA_Cipher(Environment* env, const KeyObjectData& key_data, const RSACipherConfig& params, @@ -206,45 +196,16 @@ WebCryptoCipherStatus RSA_Cipher(Environment* env, CHECK_NE(key_data.GetKeyType(), kKeyTypeSecret); Mutex::ScopedLock lock(key_data.mutex()); const auto& m_pkey = key_data.GetAsymmetricKey(); + const ncrypto::Rsa::CipherParams nparams{ + .padding = params.padding, + .digest = params.digest, + .label = params.label, + }; - EVPKeyCtxPointer ctx = m_pkey.newCtx(); - - if (!ctx || init(ctx.get()) <= 0) - return WebCryptoCipherStatus::FAILED; - - if (EVP_PKEY_CTX_set_rsa_padding(ctx.get(), params.padding) <= 0) { - return WebCryptoCipherStatus::FAILED; - } - - if (params.digest != nullptr && - (EVP_PKEY_CTX_set_rsa_oaep_md(ctx.get(), params.digest) <= 0 || - EVP_PKEY_CTX_set_rsa_mgf1_md(ctx.get(), params.digest) <= 0)) { - return WebCryptoCipherStatus::FAILED; - } - - if (!SetRsaOaepLabel(ctx, params.label)) return WebCryptoCipherStatus::FAILED; - - size_t out_len = 0; - if (cipher( - ctx.get(), - nullptr, - &out_len, - in.data(), - in.size()) <= 0) { - return WebCryptoCipherStatus::FAILED; - } - - ByteSource::Builder buf(out_len); - - if (cipher(ctx.get(), - buf.data(), - &out_len, - in.data(), - in.size()) <= 0) { - return WebCryptoCipherStatus::FAILED; - } + auto data = cipher(m_pkey, nparams, in); + if (!data) return WebCryptoCipherStatus::FAILED; - *out = std::move(buf).release(out_len); + *out = ByteSource::Allocated(data.release()); return WebCryptoCipherStatus::OK; } } // namespace @@ -316,7 +277,7 @@ Maybe RSACipherTraits::AdditionalConfig( CHECK(args[offset + 1]->IsString()); // digest Utf8Value digest(env->isolate(), args[offset + 1]); - params->digest = EVP_get_digestbyname(*digest); + params->digest = ncrypto::getDigestByName(digest.ToStringView()); if (params->digest == nullptr) { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *digest); return Nothing(); @@ -349,12 +310,10 @@ WebCryptoCipherStatus RSACipherTraits::DoCipher(Environment* env, switch (cipher_mode) { case kWebCryptoCipherEncrypt: CHECK_EQ(key_data.GetKeyType(), kKeyTypePublic); - return RSA_Cipher( - env, key_data, params, in, out); + return RSA_Cipher(env, key_data, params, in, out); case kWebCryptoCipherDecrypt: CHECK_EQ(key_data.GetKeyType(), kKeyTypePrivate); - return RSA_Cipher( - env, key_data, params, in, out); + return RSA_Cipher(env, key_data, params, in, out); } return WebCryptoCipherStatus::FAILED; } @@ -364,50 +323,37 @@ Maybe ExportJWKRsaKey(Environment* env, Local target) { Mutex::ScopedLock lock(key.mutex()); const auto& m_pkey = key.GetAsymmetricKey(); - int type = m_pkey.id(); - CHECK(type == EVP_PKEY_RSA || type == EVP_PKEY_RSA_PSS); - // TODO(tniessen): Remove the "else" branch once we drop support for OpenSSL - // versions older than 1.1.1e via FIPS / dynamic linking. - const RSA* rsa; - if (OpenSSL_version_num() >= 0x1010105fL) { - rsa = EVP_PKEY_get0_RSA(m_pkey.get()); - } else { - rsa = static_cast(EVP_PKEY_get0(m_pkey.get())); - } - CHECK_NOT_NULL(rsa); - - const BIGNUM* n; - const BIGNUM* e; - const BIGNUM* d; - const BIGNUM* p; - const BIGNUM* q; - const BIGNUM* dp; - const BIGNUM* dq; - const BIGNUM* qi; - RSA_get0_key(rsa, &n, &e, &d); - - if (target->Set( - env->context(), - env->jwk_kty_string(), - env->jwk_rsa_string()).IsNothing()) { + const ncrypto::Rsa rsa = m_pkey; + if (!rsa || + target->Set(env->context(), env->jwk_kty_string(), env->jwk_rsa_string()) + .IsNothing()) { return Nothing(); } - if (SetEncodedValue(env, target, env->jwk_n_string(), n).IsNothing() || - SetEncodedValue(env, target, env->jwk_e_string(), e).IsNothing()) { + auto pub_key = rsa.getPublicKey(); + + if (SetEncodedValue(env, target, env->jwk_n_string(), pub_key.n) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_e_string(), pub_key.e) + .IsNothing()) { return Nothing(); } if (key.GetKeyType() == kKeyTypePrivate) { - RSA_get0_factors(rsa, &p, &q); - RSA_get0_crt_params(rsa, &dp, &dq, &qi); - if (SetEncodedValue(env, target, env->jwk_d_string(), d).IsNothing() || - SetEncodedValue(env, target, env->jwk_p_string(), p).IsNothing() || - SetEncodedValue(env, target, env->jwk_q_string(), q).IsNothing() || - SetEncodedValue(env, target, env->jwk_dp_string(), dp).IsNothing() || - SetEncodedValue(env, target, env->jwk_dq_string(), dq).IsNothing() || - SetEncodedValue(env, target, env->jwk_qi_string(), qi).IsNothing()) { + auto pvt_key = rsa.getPrivateKey(); + if (SetEncodedValue(env, target, env->jwk_d_string(), pub_key.d) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_p_string(), pvt_key.p) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_q_string(), pvt_key.q) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_dp_string(), pvt_key.dp) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_dq_string(), pvt_key.dq) + .IsNothing() || + SetEncodedValue(env, target, env->jwk_qi_string(), pvt_key.qi) + .IsNothing()) { return Nothing(); } } @@ -440,15 +386,12 @@ KeyObjectData ImportJWKRsaKey(Environment* env, KeyType type = d_value->IsString() ? kKeyTypePrivate : kKeyTypePublic; RSAPointer rsa(RSA_new()); + ncrypto::Rsa rsa_view(rsa.get()); ByteSource n = ByteSource::FromEncodedString(env, n_value.As()); ByteSource e = ByteSource::FromEncodedString(env, e_value.As()); - if (!RSA_set0_key( - rsa.get(), - n.ToBN().release(), - e.ToBN().release(), - nullptr)) { + if (!rsa_view.setPublicKey(n.ToBN(), e.ToBN())) { THROW_ERR_CRYPTO_INVALID_JWK(env, "Invalid JWK RSA key"); return {}; } @@ -485,20 +428,15 @@ KeyObjectData ImportJWKRsaKey(Environment* env, ByteSource dq = ByteSource::FromEncodedString(env, dq_value.As()); ByteSource qi = ByteSource::FromEncodedString(env, qi_value.As()); - if (!RSA_set0_key(rsa.get(), nullptr, nullptr, d.ToBN().release()) || - !RSA_set0_factors(rsa.get(), p.ToBN().release(), q.ToBN().release()) || - !RSA_set0_crt_params( - rsa.get(), - dp.ToBN().release(), - dq.ToBN().release(), - qi.ToBN().release())) { + if (!rsa_view.setPrivateKey( + d.ToBN(), q.ToBN(), p.ToBN(), dp.ToBN(), dq.ToBN(), qi.ToBN())) { THROW_ERR_CRYPTO_INVALID_JWK(env, "Invalid JWK RSA key"); return {}; } } - auto pkey = EVPKeyPointer::New(); - CHECK_EQ(EVP_PKEY_set1_RSA(pkey.get(), rsa.get()), 1); + auto pkey = EVPKeyPointer::NewRSA(std::move(rsa)); + if (!pkey) return {}; return KeyObjectData::CreateAsymmetric(type, std::move(pkey)); } @@ -506,43 +444,32 @@ KeyObjectData ImportJWKRsaKey(Environment* env, Maybe GetRsaKeyDetail(Environment* env, const KeyObjectData& key, Local target) { - const BIGNUM* e; // Public Exponent - const BIGNUM* n; // Modulus - Mutex::ScopedLock lock(key.mutex()); const auto& m_pkey = key.GetAsymmetricKey(); - int type = m_pkey.id(); - CHECK(type == EVP_PKEY_RSA || type == EVP_PKEY_RSA_PSS); // TODO(tniessen): Remove the "else" branch once we drop support for OpenSSL // versions older than 1.1.1e via FIPS / dynamic linking. - const RSA* rsa; - if (OpenSSL_version_num() >= 0x1010105fL) { - rsa = EVP_PKEY_get0_RSA(m_pkey.get()); - } else { - rsa = static_cast(EVP_PKEY_get0(m_pkey.get())); - } - CHECK_NOT_NULL(rsa); + const ncrypto::Rsa rsa = m_pkey; + if (!rsa) return Nothing(); - RSA_get0_key(rsa, &n, &e, nullptr); + auto pub_key = rsa.getPublicKey(); if (target ->Set(env->context(), env->modulus_length_string(), - Number::New(env->isolate(), - static_cast(BignumPointer::GetBitCount(n)))) + Number::New( + env->isolate(), + static_cast(BignumPointer::GetBitCount(pub_key.n)))) .IsNothing()) { return Nothing(); } - std::unique_ptr public_exponent; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - public_exponent = ArrayBuffer::NewBackingStore( - env->isolate(), BignumPointer::GetByteCount(e)); - } + auto public_exponent = ArrayBuffer::NewBackingStore( + env->isolate(), + BignumPointer::GetByteCount(pub_key.e), + BackingStoreInitializationMode::kUninitialized); CHECK_EQ(BignumPointer::EncodePaddedInto( - e, + pub_key.e, static_cast(public_exponent->Data()), public_exponent->ByteLength()), public_exponent->ByteLength()); @@ -555,7 +482,7 @@ Maybe GetRsaKeyDetail(Environment* env, return Nothing(); } - if (type == EVP_PKEY_RSA_PSS) { + if (m_pkey.id() == EVP_PKEY_RSA_PSS) { // Due to the way ASN.1 encoding works, default values are omitted when // encoding the data structure. However, there are also RSA-PSS keys for // which no parameters are set. In that case, the ASN.1 RSASSA-PSS-params @@ -565,64 +492,34 @@ Maybe GetRsaKeyDetail(Environment* env, // In that case, RSA_get0_pss_params does not return nullptr but all fields // of the returned RSA_PSS_PARAMS will be set to nullptr. - const RSA_PSS_PARAMS* params = RSA_get0_pss_params(rsa); - if (params != nullptr) { - int hash_nid = NID_sha1; - int mgf_nid = NID_mgf1; - int mgf1_hash_nid = NID_sha1; - int64_t salt_length = 20; - - if (params->hashAlgorithm != nullptr) { - const ASN1_OBJECT* hash_obj; - X509_ALGOR_get0(&hash_obj, nullptr, nullptr, params->hashAlgorithm); - hash_nid = OBJ_obj2nid(hash_obj); - } - + auto maybe_params = rsa.getPssParams(); + if (maybe_params.has_value()) { + auto& params = maybe_params.value(); if (target - ->Set( - env->context(), - env->hash_algorithm_string(), - OneByteString(env->isolate(), OBJ_nid2ln(hash_nid))) + ->Set(env->context(), + env->hash_algorithm_string(), + OneByteString(env->isolate(), params.digest)) .IsNothing()) { return Nothing(); } - if (params->maskGenAlgorithm != nullptr) { - const ASN1_OBJECT* mgf_obj; - X509_ALGOR_get0(&mgf_obj, nullptr, nullptr, params->maskGenAlgorithm); - mgf_nid = OBJ_obj2nid(mgf_obj); - if (mgf_nid == NID_mgf1) { - const ASN1_OBJECT* mgf1_hash_obj; - X509_ALGOR_get0(&mgf1_hash_obj, nullptr, nullptr, params->maskHash); - mgf1_hash_nid = OBJ_obj2nid(mgf1_hash_obj); - } - } - // If, for some reason, the MGF is not MGF1, then the MGF1 hash function // is intentionally not added to the object. - if (mgf_nid == NID_mgf1) { + if (params.mgf1_digest.has_value()) { + auto digest = params.mgf1_digest.value(); if (target - ->Set( - env->context(), - env->mgf1_hash_algorithm_string(), - OneByteString(env->isolate(), OBJ_nid2ln(mgf1_hash_nid))) + ->Set(env->context(), + env->mgf1_hash_algorithm_string(), + OneByteString(env->isolate(), digest)) .IsNothing()) { return Nothing(); } } - if (params->saltLength != nullptr) { - if (ASN1_INTEGER_get_int64(&salt_length, params->saltLength) != 1) { - ThrowCryptoError(env, ERR_get_error(), "ASN1_INTEGER_get_in64 error"); - return Nothing(); - } - } - if (target - ->Set( - env->context(), - env->salt_length_string(), - Number::New(env->isolate(), static_cast(salt_length))) + ->Set(env->context(), + env->salt_length_string(), + Integer::New(env->isolate(), params.salt_length)) .IsNothing()) { return Nothing(); } diff --git a/src/crypto/crypto_sig.cc b/src/crypto/crypto_sig.cc index abb8a804c1b508..175a8e92ef437f 100644 --- a/src/crypto/crypto_sig.cc +++ b/src/crypto/crypto_sig.cc @@ -14,20 +14,20 @@ namespace node { using ncrypto::BignumPointer; using ncrypto::ClearErrorOnReturn; +using ncrypto::DataPointer; using ncrypto::ECDSASigPointer; -using ncrypto::ECKeyPointer; using ncrypto::EVPKeyCtxPointer; using ncrypto::EVPKeyPointer; using ncrypto::EVPMDCtxPointer; using v8::ArrayBuffer; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Boolean; using v8::FunctionCallbackInfo; using v8::FunctionTemplate; using v8::HandleScope; using v8::Int32; using v8::Isolate; -using v8::Just; using v8::JustVoid; using v8::Local; using v8::Maybe; @@ -39,44 +39,40 @@ using v8::Value; namespace crypto { namespace { -bool ValidateDSAParameters(EVP_PKEY* key) { - /* Validate DSA2 parameters from FIPS 186-4 */ - auto id = EVPKeyPointer::base_id(key); -#if OPENSSL_VERSION_MAJOR >= 3 - if (EVP_default_properties_is_fips_enabled(nullptr) && EVP_PKEY_DSA == id) { -#else - if (FIPS_mode() && EVP_PKEY_DSA == id) { -#endif - const DSA* dsa = EVP_PKEY_get0_DSA(key); - const BIGNUM* p; - const BIGNUM* q; - DSA_get0_pqg(dsa, &p, &q, nullptr); - int L = BignumPointer::GetBitCount(p); - int N = BignumPointer::GetBitCount(q); - - return (L == 1024 && N == 160) || - (L == 2048 && N == 224) || - (L == 2048 && N == 256) || - (L == 3072 && N == 256); +int GetPaddingFromJS(const EVPKeyPointer& key, Local val) { + int padding = key.getDefaultSignPadding(); + if (!val->IsUndefined()) [[likely]] { + CHECK(val->IsInt32()); + padding = val.As()->Value(); } + return padding; +} - return true; +std::optional GetSaltLenFromJS(Local val) { + std::optional salt_len; + if (!val->IsUndefined()) [[likely]] { + CHECK(val->IsInt32()); + salt_len = val.As()->Value(); + } + return salt_len; +} + +DSASigEnc GetDSASigEncFromJS(Local val) { + CHECK(val->IsInt32()); + int i = val.As()->Value(); + if (i < 0 || i >= static_cast(DSASigEnc::Invalid)) [[unlikely]] { + return DSASigEnc::Invalid; + } + return static_cast(val.As()->Value()); } bool ApplyRSAOptions(const EVPKeyPointer& pkey, EVP_PKEY_CTX* pkctx, int padding, - const Maybe& salt_len) { - int id = pkey.id(); - if (id == EVP_PKEY_RSA || id == EVP_PKEY_RSA2 || id == EVP_PKEY_RSA_PSS) { - if (EVP_PKEY_CTX_set_rsa_padding(pkctx, padding) <= 0) - return false; - if (padding == RSA_PKCS1_PSS_PADDING && salt_len.IsJust()) { - if (EVP_PKEY_CTX_set_rsa_pss_saltlen(pkctx, salt_len.FromJust()) <= 0) - return false; - } + std::optional salt_len) { + if (pkey.isRsaVariant()) { + return EVPKeyCtxPointer::setRsaPadding(pkctx, padding, salt_len); } - return true; } @@ -84,38 +80,31 @@ std::unique_ptr Node_SignFinal(Environment* env, EVPMDCtxPointer&& mdctx, const EVPKeyPointer& pkey, int padding, - Maybe pss_salt_len) { - unsigned char m[EVP_MAX_MD_SIZE]; - unsigned int m_len; - - if (!EVP_DigestFinal_ex(mdctx.get(), m, &m_len)) + std::optional pss_salt_len) { + auto data = mdctx.digestFinal(mdctx.getExpectedSize()); + if (!data) [[unlikely]] return nullptr; - size_t sig_len = pkey.size(); - std::unique_ptr sig; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - sig = ArrayBuffer::NewBackingStore(env->isolate(), sig_len); - } + auto sig = ArrayBuffer::NewBackingStore(env->isolate(), pkey.size()); + ncrypto::Buffer sig_buf{ + .data = static_cast(sig->Data()), + .len = pkey.size(), + }; + EVPKeyCtxPointer pkctx = pkey.newCtx(); - if (pkctx && EVP_PKEY_sign_init(pkctx.get()) > 0 && + if (pkctx.initForSign() > 0 && ApplyRSAOptions(pkey, pkctx.get(), padding, pss_salt_len) && - EVP_PKEY_CTX_set_signature_md(pkctx.get(), EVP_MD_CTX_md(mdctx.get())) > - 0 && - EVP_PKEY_sign(pkctx.get(), - static_cast(sig->Data()), - &sig_len, - m, - m_len) > 0) { - CHECK_LE(sig_len, sig->ByteLength()); - if (sig_len == 0) { - sig = ArrayBuffer::NewBackingStore(env->isolate(), 0); - } else if (sig_len != sig->ByteLength()) { - std::unique_ptr old_sig = std::move(sig); - sig = ArrayBuffer::NewBackingStore(env->isolate(), sig_len); - memcpy(static_cast(sig->Data()), - static_cast(old_sig->Data()), - sig_len); + pkctx.setSignatureMd(mdctx) && pkctx.signInto(data, &sig_buf)) + [[likely]] { + CHECK_LE(sig_buf.len, sig->ByteLength()); + if (sig_buf.len < sig->ByteLength()) { + auto new_sig = ArrayBuffer::NewBackingStore(env->isolate(), sig_buf.len); + if (sig_buf.len > 0) [[likely]] { + memcpy(static_cast(new_sig->Data()), + static_cast(sig->Data()), + sig_buf.len); + } + sig = std::move(new_sig); } return sig; } @@ -123,62 +112,26 @@ std::unique_ptr Node_SignFinal(Environment* env, return nullptr; } -int GetDefaultSignPadding(const EVPKeyPointer& m_pkey) { - return m_pkey.id() == EVP_PKEY_RSA_PSS ? RSA_PKCS1_PSS_PADDING - : RSA_PKCS1_PADDING; -} - -unsigned int GetBytesOfRS(const EVPKeyPointer& pkey) { - int bits, base_id = pkey.base_id(); - - if (base_id == EVP_PKEY_DSA) { - const DSA* dsa_key = EVP_PKEY_get0_DSA(pkey.get()); - // Both r and s are computed mod q, so their width is limited by that of q. - bits = BignumPointer::GetBitCount(DSA_get0_q(dsa_key)); - } else if (base_id == EVP_PKEY_EC) { - bits = EC_GROUP_order_bits(ECKeyPointer::GetGroup(pkey)); - } else { - return kNoDsaSignature; - } - - return (bits + 7) / 8; -} - -bool ExtractP1363( - const unsigned char* sig_data, - unsigned char* out, - size_t len, - size_t n) { - ncrypto::Buffer sig_buffer{ - .data = sig_data, - .len = len, - }; - auto asn1_sig = ECDSASigPointer::Parse(sig_buffer); - if (!asn1_sig) - return false; - - return BignumPointer::EncodePaddedInto(asn1_sig.r(), out, n) > 0 && - BignumPointer::EncodePaddedInto(asn1_sig.s(), out + n, n) > 0; -} - // Returns the maximum size of each of the integers (r, s) of the DSA signature. std::unique_ptr ConvertSignatureToP1363( Environment* env, const EVPKeyPointer& pkey, std::unique_ptr&& signature) { - unsigned int n = GetBytesOfRS(pkey); - if (n == kNoDsaSignature) - return std::move(signature); + uint32_t n = pkey.getBytesOfRS().value_or(kNoDsaSignature); + if (n == kNoDsaSignature) return std::move(signature); - std::unique_ptr buf; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - buf = ArrayBuffer::NewBackingStore(env->isolate(), 2 * n); - } - if (!ExtractP1363(static_cast(signature->Data()), - static_cast(buf->Data()), - signature->ByteLength(), n)) + auto buf = ArrayBuffer::NewBackingStore( + env->isolate(), 2 * n, BackingStoreInitializationMode::kUninitialized); + + ncrypto::Buffer sig_buffer{ + .data = static_cast(signature->Data()), + .len = signature->ByteLength(), + }; + + if (!ncrypto::extractP1363( + sig_buffer, static_cast(buf->Data()), n)) { return std::move(signature); + } return buf; } @@ -187,30 +140,33 @@ std::unique_ptr ConvertSignatureToP1363( ByteSource ConvertSignatureToP1363(Environment* env, const EVPKeyPointer& pkey, const ByteSource& signature) { - unsigned int n = GetBytesOfRS(pkey); - if (n == kNoDsaSignature) - return ByteSource(); + unsigned int n = pkey.getBytesOfRS().value_or(kNoDsaSignature); + if (n == kNoDsaSignature) [[unlikely]] + return {}; - const unsigned char* sig_data = signature.data(); + auto data = DataPointer::Alloc(n * 2); + if (!data) [[unlikely]] + return {}; + unsigned char* out = static_cast(data.get()); - ByteSource::Builder out(n * 2); - memset(out.data(), 0, n * 2); + // Extracting the signature may not actually use all of the allocated space. + // We need to ensure that the buffer is zeroed out before use. + data.zero(); - if (!ExtractP1363(sig_data, out.data(), signature.size(), n)) - return ByteSource(); + if (!ncrypto::extractP1363(signature, out, n)) [[unlikely]] { + return {}; + } - return std::move(out).release(); + return ByteSource::Allocated(data.release()); } ByteSource ConvertSignatureToDER(const EVPKeyPointer& pkey, ByteSource&& out) { - unsigned int n = GetBytesOfRS(pkey); - if (n == kNoDsaSignature) - return std::move(out); + unsigned int n = pkey.getBytesOfRS().value_or(kNoDsaSignature); + if (n == kNoDsaSignature) return std::move(out); const unsigned char* sig_data = out.data(); - if (out.size() != 2 * n) - return ByteSource(); + if (out.size() != 2 * n) return {}; auto asn1_sig = ECDSASigPointer::New(); CHECK(asn1_sig); @@ -221,7 +177,8 @@ ByteSource ConvertSignatureToDER(const EVPKeyPointer& pkey, ByteSource&& out) { CHECK(asn1_sig.setParams(std::move(r), std::move(s))); auto buf = asn1_sig.encode(); - if (buf.len <= 0) return ByteSource(); + if (buf.len <= 0) [[unlikely]] + return {}; CHECK_NOT_NULL(buf.data); return ByteSource::Allocated(buf); @@ -231,95 +188,87 @@ void CheckThrow(Environment* env, SignBase::Error error) { HandleScope scope(env->isolate()); switch (error) { - case SignBase::Error::kSignUnknownDigest: + case SignBase::Error::UnknownDigest: return THROW_ERR_CRYPTO_INVALID_DIGEST(env); - case SignBase::Error::kSignNotInitialised: + case SignBase::Error::NotInitialised: return THROW_ERR_CRYPTO_INVALID_STATE(env, "Not initialised"); - case SignBase::Error::kSignMalformedSignature: + case SignBase::Error::MalformedSignature: return THROW_ERR_CRYPTO_OPERATION_FAILED(env, "Malformed signature"); - case SignBase::Error::kSignInit: - case SignBase::Error::kSignUpdate: - case SignBase::Error::kSignPrivateKey: - case SignBase::Error::kSignPublicKey: - { - unsigned long err = ERR_get_error(); // NOLINT(runtime/int) - if (err) - return ThrowCryptoError(env, err); - switch (error) { - case SignBase::Error::kSignInit: - return THROW_ERR_CRYPTO_OPERATION_FAILED(env, - "EVP_SignInit_ex failed"); - case SignBase::Error::kSignUpdate: - return THROW_ERR_CRYPTO_OPERATION_FAILED(env, - "EVP_SignUpdate failed"); - case SignBase::Error::kSignPrivateKey: - return THROW_ERR_CRYPTO_OPERATION_FAILED(env, - "PEM_read_bio_PrivateKey failed"); - case SignBase::Error::kSignPublicKey: - return THROW_ERR_CRYPTO_OPERATION_FAILED(env, - "PEM_read_bio_PUBKEY failed"); - default: - ABORT(); - } + case SignBase::Error::Init: + case SignBase::Error::Update: + case SignBase::Error::PrivateKey: + case SignBase::Error::PublicKey: { + unsigned long err = ERR_get_error(); // NOLINT(runtime/int) + if (err) return ThrowCryptoError(env, err); + switch (error) { + case SignBase::Error::Init: + return THROW_ERR_CRYPTO_OPERATION_FAILED(env, + "EVP_SignInit_ex failed"); + case SignBase::Error::Update: + return THROW_ERR_CRYPTO_OPERATION_FAILED(env, + "EVP_SignUpdate failed"); + case SignBase::Error::PrivateKey: + return THROW_ERR_CRYPTO_OPERATION_FAILED( + env, "PEM_read_bio_PrivateKey failed"); + case SignBase::Error::PublicKey: + return THROW_ERR_CRYPTO_OPERATION_FAILED( + env, "PEM_read_bio_PUBKEY failed"); + default: + ABORT(); } + } - case SignBase::Error::kSignOk: + case SignBase::Error::Ok: return; } } -bool IsOneShot(const EVPKeyPointer& key) { - return key.id() == EVP_PKEY_ED25519 || key.id() == EVP_PKEY_ED448; -} - -bool UseP1363Encoding(const EVPKeyPointer& key, const DSASigEnc& dsa_encoding) { - return (key.id() == EVP_PKEY_EC || key.id() == EVP_PKEY_DSA) && - dsa_encoding == kSigEncP1363; +bool UseP1363Encoding(const EVPKeyPointer& key, const DSASigEnc dsa_encoding) { + return key.isSigVariant() && dsa_encoding == DSASigEnc::P1363; } } // namespace -SignBase::Error SignBase::Init(const char* sign_type) { +SignBase::Error SignBase::Init(std::string_view digest) { CHECK_NULL(mdctx_); - // Historically, "dss1" and "DSS1" were DSA aliases for SHA-1 - // exposed through the public API. - if (strcmp(sign_type, "dss1") == 0 || - strcmp(sign_type, "DSS1") == 0) { - sign_type = "SHA1"; - } - const EVP_MD* md = EVP_get_digestbyname(sign_type); - if (md == nullptr) - return kSignUnknownDigest; - - mdctx_.reset(EVP_MD_CTX_new()); - if (!mdctx_ || !EVP_DigestInit_ex(mdctx_.get(), md, nullptr)) { + auto md = ncrypto::getDigestByName(digest); + if (md == nullptr) [[unlikely]] + return Error::UnknownDigest; + + mdctx_ = EVPMDCtxPointer::New(); + + if (!mdctx_.digestInit(md)) [[unlikely]] { mdctx_.reset(); - return kSignInit; + return Error::Init; } - return kSignOk; + return Error::Ok; } SignBase::Error SignBase::Update(const char* data, size_t len) { - if (mdctx_ == nullptr) - return kSignNotInitialised; - if (!EVP_DigestUpdate(mdctx_.get(), data, len)) - return kSignUpdate; - return kSignOk; + if (mdctx_ == nullptr) [[unlikely]] + return Error::NotInitialised; + + ncrypto::Buffer buf{ + .data = data, + .len = len, + }; + + return mdctx_.digestUpdate(buf) ? Error::Ok : Error::Update; } SignBase::SignBase(Environment* env, Local wrap) - : BaseObject(env, wrap) {} + : BaseObject(env, wrap) { + MakeWeak(); +} void SignBase::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackFieldWithSize("mdctx", mdctx_ ? kSizeOf_EVP_MD_CTX : 0); } -Sign::Sign(Environment* env, Local wrap) : SignBase(env, wrap) { - MakeWeak(); -} +Sign::Sign(Environment* env, Local wrap) : SignBase(env, wrap) {} void Sign::Initialize(Environment* env, Local target) { Isolate* isolate = env->isolate(); @@ -335,8 +284,13 @@ void Sign::Initialize(Environment* env, Local target) { SignJob::Initialize(env, target); - constexpr int kSignJobModeSign = SignConfiguration::kSign; - constexpr int kSignJobModeVerify = SignConfiguration::kVerify; + constexpr int kSignJobModeSign = + static_cast(SignConfiguration::Mode::Sign); + constexpr int kSignJobModeVerify = + static_cast(SignConfiguration::Mode::Verify); + + constexpr auto kSigEncDER = DSASigEnc::DER; + constexpr auto kSigEncP1363 = DSASigEnc::P1363; NODE_DEFINE_CONSTANT(target, kSignJobModeSign); NODE_DEFINE_CONSTANT(target, kSignJobModeVerify); @@ -363,8 +317,8 @@ void Sign::SignInit(const FunctionCallbackInfo& args) { Sign* sign; ASSIGN_OR_RETURN_UNWRAP(&sign, args.This()); - const node::Utf8Value sign_type(args.GetIsolate(), args[0]); - crypto::CheckThrow(env, sign->Init(*sign_type)); + const node::Utf8Value sign_type(env->isolate(), args[0]); + crypto::CheckThrow(env, sign->Init(sign_type.ToStringView())); } void Sign::SignUpdate(const FunctionCallbackInfo& args) { @@ -380,20 +334,22 @@ void Sign::SignUpdate(const FunctionCallbackInfo& args) { Sign::SignResult Sign::SignFinal(const EVPKeyPointer& pkey, int padding, - const Maybe& salt_len, + std::optional salt_len, DSASigEnc dsa_sig_enc) { - if (!mdctx_) - return SignResult(kSignNotInitialised); + if (!mdctx_) [[unlikely]] { + return SignResult(Error::NotInitialised); + } EVPMDCtxPointer mdctx = std::move(mdctx_); - if (!ValidateDSAParameters(pkey.get())) - return SignResult(kSignPrivateKey); + if (!pkey.validateDsaParameters()) { + return SignResult(Error::PrivateKey); + } - std::unique_ptr buffer = + auto buffer = Node_SignFinal(env(), std::move(mdctx), pkey, padding, salt_len); - Error error = buffer ? kSignOk : kSignPrivateKey; - if (error == kSignOk && dsa_sig_enc == kSigEncP1363) { + Error error = buffer ? Error::Ok : Error::PrivateKey; + if (error == Error::Ok && dsa_sig_enc == DSASigEnc::P1363) { buffer = ConvertSignatureToP1363(env(), pkey, std::move(buffer)); CHECK_NOT_NULL(buffer->Data()); } @@ -412,49 +368,34 @@ void Sign::SignFinal(const FunctionCallbackInfo& args) { if (!data) [[unlikely]] return; const auto& key = data.GetAsymmetricKey(); - if (!key) + if (!key) [[unlikely]] return; - if (IsOneShot(key)) { + if (key.isOneShotVariant()) [[unlikely]] { THROW_ERR_CRYPTO_UNSUPPORTED_OPERATION(env); return; } - int padding = GetDefaultSignPadding(key); - if (!args[offset]->IsUndefined()) { - CHECK(args[offset]->IsInt32()); - padding = args[offset].As()->Value(); - } - - Maybe salt_len = Nothing(); - if (!args[offset + 1]->IsUndefined()) { - CHECK(args[offset + 1]->IsInt32()); - salt_len = Just(args[offset + 1].As()->Value()); + int padding = GetPaddingFromJS(key, args[offset]); + std::optional salt_len = GetSaltLenFromJS(args[offset + 1]); + DSASigEnc dsa_sig_enc = GetDSASigEncFromJS(args[offset + 2]); + if (dsa_sig_enc == DSASigEnc::Invalid) [[unlikely]] { + THROW_ERR_OUT_OF_RANGE(env, "invalid signature encoding"); + return; } - CHECK(args[offset + 2]->IsInt32()); - DSASigEnc dsa_sig_enc = - static_cast(args[offset + 2].As()->Value()); + SignResult ret = sign->SignFinal(key, padding, salt_len, dsa_sig_enc); - SignResult ret = sign->SignFinal( - key, - padding, - salt_len, - dsa_sig_enc); - - if (ret.error != kSignOk) + if (ret.error != Error::Ok) [[unlikely]] { return crypto::CheckThrow(env, ret.error); + } - Local ab = - ArrayBuffer::New(env->isolate(), std::move(ret.signature)); + auto ab = ArrayBuffer::New(env->isolate(), std::move(ret.signature)); args.GetReturnValue().Set( Buffer::New(env, ab, 0, ab->ByteLength()).FromMaybe(Local())); } -Verify::Verify(Environment* env, Local wrap) - : SignBase(env, wrap) { - MakeWeak(); -} +Verify::Verify(Environment* env, Local wrap) : SignBase(env, wrap) {} void Verify::Initialize(Environment* env, Local target) { Isolate* isolate = env->isolate(); @@ -486,8 +427,8 @@ void Verify::VerifyInit(const FunctionCallbackInfo& args) { Verify* verify; ASSIGN_OR_RETURN_UNWRAP(&verify, args.This()); - const node::Utf8Value verify_type(args.GetIsolate(), args[0]); - crypto::CheckThrow(env, verify->Init(*verify_type)); + const node::Utf8Value verify_type(env->isolate(), args[0]); + crypto::CheckThrow(env, verify->Init(verify_type.ToStringView())); } void Verify::VerifyUpdate(const FunctionCallbackInfo& args) { @@ -495,8 +436,9 @@ void Verify::VerifyUpdate(const FunctionCallbackInfo& args) { const FunctionCallbackInfo& args, const char* data, size_t size) { Environment* env = Environment::GetCurrent(args); - if (size > INT_MAX) [[unlikely]] + if (size > INT_MAX) [[unlikely]] { return THROW_ERR_OUT_OF_RANGE(env, "data is too long"); + } Error err = verify->Update(data, size); crypto::CheckThrow(verify->env(), err); }); @@ -505,35 +447,30 @@ void Verify::VerifyUpdate(const FunctionCallbackInfo& args) { SignBase::Error Verify::VerifyFinal(const EVPKeyPointer& pkey, const ByteSource& sig, int padding, - const Maybe& saltlen, + std::optional saltlen, bool* verify_result) { - if (!mdctx_) - return kSignNotInitialised; + if (!mdctx_) [[unlikely]] + return Error::NotInitialised; - unsigned char m[EVP_MAX_MD_SIZE]; - unsigned int m_len; *verify_result = false; EVPMDCtxPointer mdctx = std::move(mdctx_); - if (!EVP_DigestFinal_ex(mdctx.get(), m, &m_len)) - return kSignPublicKey; + auto data = mdctx.digestFinal(mdctx.getExpectedSize()); + if (!data) [[unlikely]] + return Error::PublicKey; EVPKeyCtxPointer pkctx = pkey.newCtx(); - if (pkctx) { - const int init_ret = EVP_PKEY_verify_init(pkctx.get()); - if (init_ret == -2) { - return kSignPublicKey; - } + if (pkctx) [[likely]] { + const int init_ret = pkctx.initForVerify(); + if (init_ret == -2) [[unlikely]] + return Error::PublicKey; if (init_ret > 0 && ApplyRSAOptions(pkey, pkctx.get(), padding, saltlen) && - EVP_PKEY_CTX_set_signature_md(pkctx.get(), EVP_MD_CTX_md(mdctx.get())) > - 0) { - const unsigned char* s = sig.data(); - const int r = EVP_PKEY_verify(pkctx.get(), s, sig.size(), m, m_len); - *verify_result = r == 1; + pkctx.setSignatureMd(mdctx)) { + *verify_result = pkctx.verify(sig, data); } } - return kSignOk; + return Error::Ok; } void Verify::VerifyFinal(const FunctionCallbackInfo& args) { @@ -545,47 +482,42 @@ void Verify::VerifyFinal(const FunctionCallbackInfo& args) { unsigned int offset = 0; auto data = KeyObjectData::GetPublicOrPrivateKeyFromJs(args, &offset); - if (!data) return; - const auto& pkey = data.GetAsymmetricKey(); - if (!pkey) + if (!data) [[unlikely]] + return; + const auto& key = data.GetAsymmetricKey(); + if (!key) [[unlikely]] return; - if (IsOneShot(pkey)) { + if (key.isOneShotVariant()) [[unlikely]] { THROW_ERR_CRYPTO_UNSUPPORTED_OPERATION(env); return; } ArrayBufferOrViewContents hbuf(args[offset]); - if (!hbuf.CheckSizeInt32()) [[unlikely]] + if (!hbuf.CheckSizeInt32()) [[unlikely]] { return THROW_ERR_OUT_OF_RANGE(env, "buffer is too big"); - - int padding = GetDefaultSignPadding(pkey); - if (!args[offset + 1]->IsUndefined()) { - CHECK(args[offset + 1]->IsInt32()); - padding = args[offset + 1].As()->Value(); } - Maybe salt_len = Nothing(); - if (!args[offset + 2]->IsUndefined()) { - CHECK(args[offset + 2]->IsInt32()); - salt_len = Just(args[offset + 2].As()->Value()); + int padding = GetPaddingFromJS(key, args[offset + 1]); + std::optional salt_len = GetSaltLenFromJS(args[offset + 2]); + DSASigEnc dsa_sig_enc = GetDSASigEncFromJS(args[offset + 3]); + if (dsa_sig_enc == DSASigEnc::Invalid) [[unlikely]] { + THROW_ERR_OUT_OF_RANGE(env, "invalid signature encoding"); + return; } - CHECK(args[offset + 3]->IsInt32()); - DSASigEnc dsa_sig_enc = - static_cast(args[offset + 3].As()->Value()); - ByteSource signature = hbuf.ToByteSource(); - if (dsa_sig_enc == kSigEncP1363) { - signature = ConvertSignatureToDER(pkey, hbuf.ToByteSource()); - if (signature.data() == nullptr) - return crypto::CheckThrow(env, Error::kSignMalformedSignature); + if (dsa_sig_enc == DSASigEnc::P1363) { + signature = ConvertSignatureToDER(key, hbuf.ToByteSource()); + if (signature.data() == nullptr) [[unlikely]] { + return crypto::CheckThrow(env, Error::MalformedSignature); + } } bool verify_result; - Error err = verify->VerifyFinal(pkey, signature, padding, - salt_len, &verify_result); - if (err != kSignOk) + Error err = + verify->VerifyFinal(key, signature, padding, salt_len, &verify_result); + if (err != Error::Ok) [[unlikely]] return crypto::CheckThrow(env, err); args.GetReturnValue().Set(verify_result); } @@ -633,7 +565,7 @@ Maybe SignTraits::AdditionalConfig( static_cast(args[offset].As()->Value()); unsigned int keyParamOffset = offset + 1; - if (params->mode == SignConfiguration::kVerify) { + if (params->mode == SignConfiguration::Mode::Verify) { auto data = KeyObjectData::GetPublicOrPrivateKeyFromJs(args, &keyParamOffset); if (!data) return Nothing(); @@ -655,8 +587,8 @@ Maybe SignTraits::AdditionalConfig( if (args[offset + 6]->IsString()) { Utf8Value digest(env->isolate(), args[offset + 6]); - params->digest = EVP_get_digestbyname(*digest); - if (params->digest == nullptr) { + params->digest = ncrypto::getDigestByName(digest.ToStringView()); + if (params->digest == nullptr) [[unlikely]] { THROW_ERR_CRYPTO_INVALID_DIGEST(env, "Invalid digest: %s", *digest); return Nothing(); } @@ -664,24 +596,24 @@ Maybe SignTraits::AdditionalConfig( if (args[offset + 7]->IsInt32()) { // Salt length params->flags |= SignConfiguration::kHasSaltLength; - params->salt_length = args[offset + 7].As()->Value(); + params->salt_length = + GetSaltLenFromJS(args[offset + 7]).value_or(params->salt_length); } if (args[offset + 8]->IsUint32()) { // Padding params->flags |= SignConfiguration::kHasPadding; - params->padding = args[offset + 8].As()->Value(); + params->padding = + GetPaddingFromJS(params->key.GetAsymmetricKey(), args[offset + 8]); } if (args[offset + 9]->IsUint32()) { // DSA Encoding - params->dsa_encoding = - static_cast(args[offset + 9].As()->Value()); - if (params->dsa_encoding != kSigEncDER && - params->dsa_encoding != kSigEncP1363) { + params->dsa_encoding = GetDSASigEncFromJS(args[offset + 9]); + if (params->dsa_encoding == DSASigEnc::Invalid) [[unlikely]] { THROW_ERR_OUT_OF_RANGE(env, "invalid signature encoding"); return Nothing(); } } - if (params->mode == SignConfiguration::kVerify) { + if (params->mode == SignConfiguration::Mode::Verify) { ArrayBufferOrViewContents signature(args[offset + 10]); if (!signature.CheckSizeInt32()) [[unlikely]] { THROW_ERR_OUT_OF_RANGE(env, "signature is too big"); @@ -708,97 +640,69 @@ bool SignTraits::DeriveBits( const SignConfiguration& params, ByteSource* out) { ClearErrorOnReturn clear_error_on_return; - EVPMDCtxPointer context(EVP_MD_CTX_new()); - EVP_PKEY_CTX* ctx = nullptr; - + auto context = EVPMDCtxPointer::New(); + if (!context) [[unlikely]] + return false; const auto& key = params.key.GetAsymmetricKey(); - switch (params.mode) { - case SignConfiguration::kSign: - if (!EVP_DigestSignInit( - context.get(), &ctx, params.digest, nullptr, key.get())) { - crypto::CheckThrow(env, SignBase::Error::kSignInit); - return false; - } - break; - case SignConfiguration::kVerify: - if (!EVP_DigestVerifyInit( - context.get(), &ctx, params.digest, nullptr, key.get())) { - crypto::CheckThrow(env, SignBase::Error::kSignInit); - return false; - } - break; + auto ctx = ([&] { + switch (params.mode) { + case SignConfiguration::Mode::Sign: + return context.signInit(key, params.digest); + case SignConfiguration::Mode::Verify: + return context.verifyInit(key, params.digest); + } + UNREACHABLE(); + })(); + + if (!ctx.has_value()) [[unlikely]] { + crypto::CheckThrow(env, SignBase::Error::Init); + return false; } int padding = params.flags & SignConfiguration::kHasPadding ? params.padding - : GetDefaultSignPadding(key); + : key.getDefaultSignPadding(); - Maybe salt_length = params.flags & SignConfiguration::kHasSaltLength - ? Just(params.salt_length) : Nothing(); + std::optional salt_length = + params.flags & SignConfiguration::kHasSaltLength + ? std::optional(params.salt_length) + : std::nullopt; - if (!ApplyRSAOptions(key, ctx, padding, salt_length)) { - crypto::CheckThrow(env, SignBase::Error::kSignPrivateKey); + if (!ApplyRSAOptions(key, *ctx, padding, salt_length)) { + crypto::CheckThrow(env, SignBase::Error::PrivateKey); return false; } switch (params.mode) { - case SignConfiguration::kSign: { - if (IsOneShot(key)) { - size_t len; - if (!EVP_DigestSign( - context.get(), - nullptr, - &len, - params.data.data(), - params.data.size())) { - crypto::CheckThrow(env, SignBase::Error::kSignPrivateKey); - return false; - } - ByteSource::Builder buf(len); - if (!EVP_DigestSign(context.get(), - buf.data(), - &len, - params.data.data(), - params.data.size())) { - crypto::CheckThrow(env, SignBase::Error::kSignPrivateKey); + case SignConfiguration::Mode::Sign: { + if (key.isOneShotVariant()) { + auto data = context.signOneShot(params.data); + if (!data) [[unlikely]] { + crypto::CheckThrow(env, SignBase::Error::PrivateKey); return false; } - *out = std::move(buf).release(len); + *out = ByteSource::Allocated(data.release()); } else { - size_t len; - if (!EVP_DigestSignUpdate( - context.get(), - params.data.data(), - params.data.size()) || - !EVP_DigestSignFinal(context.get(), nullptr, &len)) { - crypto::CheckThrow(env, SignBase::Error::kSignPrivateKey); - return false; - } - ByteSource::Builder buf(len); - if (!EVP_DigestSignFinal( - context.get(), buf.data(), &len)) { - crypto::CheckThrow(env, SignBase::Error::kSignPrivateKey); + auto data = context.sign(params.data); + if (!data) [[unlikely]] { + crypto::CheckThrow(env, SignBase::Error::PrivateKey); return false; } + auto bs = ByteSource::Allocated(data.release()); if (UseP1363Encoding(key, params.dsa_encoding)) { - *out = ConvertSignatureToP1363(env, key, std::move(buf).release()); + *out = ConvertSignatureToP1363(env, key, std::move(bs)); } else { - *out = std::move(buf).release(len); + *out = std::move(bs); } } break; } - case SignConfiguration::kVerify: { + case SignConfiguration::Mode::Verify: { ByteSource::Builder buf(1); buf.data()[0] = 0; - if (EVP_DigestVerify( - context.get(), - params.signature.data(), - params.signature.size(), - params.data.data(), - params.data.size()) == 1) { + if (context.verify(params.data, params.signature)) { buf.data()[0] = 1; } *out = std::move(buf).release(); @@ -812,9 +716,9 @@ MaybeLocal SignTraits::EncodeOutput(Environment* env, const SignConfiguration& params, ByteSource* out) { switch (params.mode) { - case SignConfiguration::kSign: + case SignConfiguration::Mode::Sign: return out->ToArrayBuffer(env); - case SignConfiguration::kVerify: + case SignConfiguration::Mode::Verify: return Boolean::New(env->isolate(), out->data()[0] == 1); } UNREACHABLE(); diff --git a/src/crypto/crypto_sig.h b/src/crypto/crypto_sig.h index 3a3c27b4e8e748..36c51b07bb5692 100644 --- a/src/crypto/crypto_sig.h +++ b/src/crypto/crypto_sig.h @@ -13,27 +13,24 @@ namespace node { namespace crypto { static const unsigned int kNoDsaSignature = static_cast(-1); -enum DSASigEnc { - kSigEncDER, - kSigEncP1363 -}; +enum class DSASigEnc { DER, P1363, Invalid }; class SignBase : public BaseObject { public: - enum Error { - kSignOk, - kSignUnknownDigest, - kSignInit, - kSignNotInitialised, - kSignUpdate, - kSignPrivateKey, - kSignPublicKey, - kSignMalformedSignature + enum class Error { + Ok, + UnknownDigest, + Init, + NotInitialised, + Update, + PrivateKey, + PublicKey, + MalformedSignature }; SignBase(Environment* env, v8::Local wrap); - Error Init(const char* sign_type); + Error Init(std::string_view digest); Error Update(const char* data, size_t len); // TODO(joyeecheung): track the memory used by OpenSSL types @@ -45,7 +42,7 @@ class SignBase : public BaseObject { ncrypto::EVPMDCtxPointer mdctx_; }; -class Sign : public SignBase { +class Sign final : public SignBase { public: static void Initialize(Environment* env, v8::Local target); static void RegisterExternalReferences(ExternalReferenceRegistry* registry); @@ -54,15 +51,14 @@ class Sign : public SignBase { Error error; std::unique_ptr signature; - explicit SignResult( - Error err, - std::unique_ptr&& sig = nullptr) - : error(err), signature(std::move(sig)) {} + inline explicit SignResult( + Error err, std::unique_ptr&& sig = nullptr) + : error(err), signature(std::move(sig)) {} }; SignResult SignFinal(const ncrypto::EVPKeyPointer& pkey, int padding, - const v8::Maybe& saltlen, + std::optional saltlen, DSASigEnc dsa_sig_enc); static void SignSync(const v8::FunctionCallbackInfo& args); @@ -76,7 +72,7 @@ class Sign : public SignBase { Sign(Environment* env, v8::Local wrap); }; -class Verify : public SignBase { +class Verify final : public SignBase { public: static void Initialize(Environment* env, v8::Local target); static void RegisterExternalReferences(ExternalReferenceRegistry* registry); @@ -84,7 +80,7 @@ class Verify : public SignBase { Error VerifyFinal(const ncrypto::EVPKeyPointer& key, const ByteSource& sig, int padding, - const v8::Maybe& saltlen, + std::optional saltlen, bool* verify_result); static void VerifySync(const v8::FunctionCallbackInfo& args); @@ -99,10 +95,7 @@ class Verify : public SignBase { }; struct SignConfiguration final : public MemoryRetainer { - enum Mode { - kSign, - kVerify - }; + enum class Mode { Sign, Verify }; enum Flags { kHasNone = 0, kHasSaltLength = 1, @@ -118,7 +111,7 @@ struct SignConfiguration final : public MemoryRetainer { int flags = SignConfiguration::kHasNone; int padding = 0; int salt_length = 0; - DSASigEnc dsa_encoding = kSigEncDER; + DSASigEnc dsa_encoding = DSASigEnc::DER; SignConfiguration() = default; @@ -135,8 +128,6 @@ struct SignTraits final { using AdditionalParameters = SignConfiguration; static constexpr const char* JobName = "SignJob"; -// TODO(@jasnell): Sign request vs. Verify request - static constexpr AsyncWrap::ProviderType Provider = AsyncWrap::PROVIDER_SIGNREQUEST; diff --git a/src/crypto/crypto_tls.cc b/src/crypto/crypto_tls.cc index 0f1defef16661a..7104ee19a6dd79 100644 --- a/src/crypto/crypto_tls.cc +++ b/src/crypto/crypto_tls.cc @@ -46,6 +46,7 @@ using v8::Array; using v8::ArrayBuffer; using v8::ArrayBufferView; using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Boolean; using v8::Context; using v8::DontDelete; @@ -60,6 +61,7 @@ using v8::Isolate; using v8::Local; using v8::MaybeLocal; using v8::Null; +using v8::Number; using v8::Object; using v8::PropertyAttribute; using v8::ReadOnly; @@ -161,7 +163,7 @@ int NewSessionCallback(SSL* s, SSL_SESSION* sess) { HandleScope handle_scope(env->isolate()); Context::Scope context_scope(env->context()); - if (!w->has_session_callbacks()) + if (!w->has_session_callbacks()) [[unlikely]] return 0; // Check if session is small enough to be stored @@ -225,10 +227,9 @@ int SSLCertCallback(SSL* s, void* arg) { auto servername = SSLPointer::GetServerName(s); Local servername_str = - !servername.has_value() ? String::Empty(env->isolate()) - : OneByteString(env->isolate(), - servername.value().data(), - servername.value().length()); + !servername.has_value() + ? String::Empty(env->isolate()) + : OneByteString(env->isolate(), servername.value()); Local ocsp = Boolean::New( env->isolate(), SSL_get_tlsext_status_type(s) == TLSEXT_STATUSTYPE_ocsp); @@ -257,30 +258,27 @@ int SelectALPNCallback( Environment* env = w->env(); HandleScope handle_scope(env->isolate()); - Local callback_arg = - Buffer::Copy(env, reinterpret_cast(in), inlen) - .ToLocalChecked(); + Local callback_arg; + Local callback_result; - MaybeLocal maybe_callback_result = - w->MakeCallback(env->alpn_callback_string(), 1, &callback_arg); - - if (maybe_callback_result.IsEmpty()) [[unlikely]] { - // Implies the callback didn't return, because some exception was thrown - // during processing, e.g. if callback returned an invalid ALPN value. + if (!Buffer::Copy(env, reinterpret_cast(in), inlen) + .ToLocal(&callback_arg)) { return SSL_TLSEXT_ERR_ALERT_FATAL; } - Local callback_result = maybe_callback_result.ToLocalChecked(); + if (!w->MakeCallback(env->alpn_callback_string(), 1, &callback_arg) + .ToLocal(&callback_result)) { + return SSL_TLSEXT_ERR_ALERT_FATAL; + } - if (callback_result->IsUndefined()) { + if (callback_result->IsUndefined() && !callback_result->IsNumber()) { // If you set an ALPN callback, but you return undefined for an ALPN // request, you're rejecting all proposed ALPN protocols, and so we send // a fatal alert: return SSL_TLSEXT_ERR_ALERT_FATAL; } - CHECK(callback_result->IsNumber()); - unsigned int result_int = callback_result.As()->Value(); + unsigned int result_int = callback_result.As()->Value(); // The callback returns an offset into the given buffer, for the selected // protocol that should be returned. We then set outlen & out to point @@ -1087,10 +1085,10 @@ int TLSWrap::DoWrite(WriteWrap* w, // and copying it when it could just be used. if (nonempty_count != 1) { - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env()->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env()->isolate(), length); - } + bs = ArrayBuffer::NewBackingStore( + env()->isolate(), + length, + BackingStoreInitializationMode::kUninitialized); size_t offset = 0; for (i = 0; i < count; i++) { memcpy(static_cast(bs->Data()) + offset, @@ -1107,8 +1105,10 @@ int TLSWrap::DoWrite(WriteWrap* w, written = SSL_write(ssl_.get(), buf->base, buf->len); if (written == -1) { - NoArrayBufferZeroFillScope no_zero_fill_scope(env()->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env()->isolate(), length); + bs = ArrayBuffer::NewBackingStore( + env()->isolate(), + length, + BackingStoreInitializationMode::kUninitialized); memcpy(bs->Data(), buf->base, buf->len); } } @@ -1750,11 +1750,8 @@ void TLSWrap::GetFinished(const FunctionCallbackInfo& args) { if (len == 0) return; - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), len); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), len, BackingStoreInitializationMode::kUninitialized); CHECK_EQ(bs->ByteLength(), SSL_get_finished(w->ssl_.get(), bs->Data(), bs->ByteLength())); @@ -1781,11 +1778,8 @@ void TLSWrap::GetPeerFinished(const FunctionCallbackInfo& args) { if (len == 0) return; - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), len); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), len, BackingStoreInitializationMode::kUninitialized); CHECK_EQ(bs->ByteLength(), SSL_get_peer_finished(w->ssl_.get(), bs->Data(), bs->ByteLength())); @@ -1810,11 +1804,8 @@ void TLSWrap::GetSession(const FunctionCallbackInfo& args) { if (slen <= 0) return; // Invalid or malformed session. - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), slen); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), slen, BackingStoreInitializationMode::kUninitialized); unsigned char* p = static_cast(bs->Data()); CHECK_LT(0, i2d_SSL_SESSION(sess, &p)); @@ -1997,11 +1988,8 @@ void TLSWrap::ExportKeyingMaterial(const FunctionCallbackInfo& args) { uint32_t olen = args[0].As()->Value(); Utf8Value label(env->isolate(), args[1]); - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), olen); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), olen, BackingStoreInitializationMode::kUninitialized); ByteSource context; bool use_context = !args[2]->IsUndefined(); diff --git a/src/crypto/crypto_tls.h b/src/crypto/crypto_tls.h index ed1ee337b42ec1..f02c37182ff189 100644 --- a/src/crypto/crypto_tls.h +++ b/src/crypto/crypto_tls.h @@ -58,15 +58,17 @@ class TLSWrap : public AsyncWrap, ~TLSWrap() override; - bool is_cert_cb_running() const { return cert_cb_running_; } - bool is_waiting_cert_cb() const { return cert_cb_ != nullptr; } - bool has_session_callbacks() const { return session_callbacks_; } - void set_cert_cb_running(bool on = true) { cert_cb_running_ = on; } - void set_awaiting_new_session(bool on = true) { awaiting_new_session_ = on; } - void enable_session_callbacks() { session_callbacks_ = true; } - bool is_server() const { return kind_ == Kind::kServer; } - bool is_client() const { return kind_ == Kind::kClient; } - bool is_awaiting_new_session() const { return awaiting_new_session_; } + inline bool is_cert_cb_running() const { return cert_cb_running_; } + inline bool is_waiting_cert_cb() const { return cert_cb_ != nullptr; } + inline bool has_session_callbacks() const { return session_callbacks_; } + inline void set_cert_cb_running(bool on = true) { cert_cb_running_ = on; } + inline void set_awaiting_new_session(bool on = true) { + awaiting_new_session_ = on; + } + inline void enable_session_callbacks() { session_callbacks_ = true; } + inline bool is_server() const { return kind_ == Kind::kServer; } + inline bool is_client() const { return kind_ == Kind::kClient; } + inline bool is_awaiting_new_session() const { return awaiting_new_session_; } // Implement StreamBase: bool IsAlive() override; @@ -128,7 +130,7 @@ class TLSWrap : public AsyncWrap, // Alternative to StreamListener::stream(), that returns a StreamBase instead // of a StreamResource. - StreamBase* underlying_stream() const { + inline StreamBase* underlying_stream() const { return static_cast(stream()); } diff --git a/src/crypto/crypto_util.cc b/src/crypto/crypto_util.cc index 60610b1b795c9b..61f8cbf6703b50 100644 --- a/src/crypto/crypto_util.cc +++ b/src/crypto/crypto_util.cc @@ -630,17 +630,12 @@ Maybe SetEncodedValue(Environment* env, : Nothing(); } -bool SetRsaOaepLabel(const EVPKeyCtxPointer& ctx, const ByteSource& label) { +bool SetRsaOaepLabel(EVPKeyCtxPointer* ctx, const ByteSource& label) { if (label.size() != 0) { // OpenSSL takes ownership of the label, so we need to create a copy. - void* label_copy = OPENSSL_memdup(label.data(), label.size()); - CHECK_NOT_NULL(label_copy); - int ret = EVP_PKEY_CTX_set0_rsa_oaep_label( - ctx.get(), static_cast(label_copy), label.size()); - if (ret <= 0) { - OPENSSL_free(label_copy); - return false; - } + auto dup = ncrypto::DataPointer::Copy(label); + if (!dup) return false; + return ctx->setRsaOaepLabel(std::move(dup)); } return true; } diff --git a/src/crypto/crypto_util.h b/src/crypto/crypto_util.h index a5967c7d24b836..e3c9a82c17b382 100644 --- a/src/crypto/crypto_util.h +++ b/src/crypto/crypto_util.h @@ -61,10 +61,9 @@ void InitCryptoOnce(); void InitCrypto(v8::Local target); -extern void UseExtraCaCerts(const std::string& file); +extern void UseExtraCaCerts(std::string_view file); int PasswordCallback(char* buf, int size, int rwflag, void* u); - int NoPasswordCallback(char* buf, int size, int rwflag, void* u); // Decode is used by the various stream-based crypto utilities to decode @@ -165,7 +164,7 @@ T* MallocOpenSSL(size_t count) { // A helper class representing a read-only byte array. When deallocated, its // contents are zeroed. -class ByteSource { +class ByteSource final { public: class Builder { public: @@ -224,17 +223,25 @@ class ByteSource { ByteSource& operator=(const ByteSource&) = delete; template - const T* data() const { + inline const T* data() const { return reinterpret_cast(data_); } - size_t size() const { return size_; } + template + operator ncrypto::Buffer() const { + return ncrypto::Buffer{ + .data = data(), + .len = size(), + }; + } + + inline size_t size() const { return size_; } - bool empty() const { return size_ == 0; } + inline bool empty() const { return size_ == 0; } - operator bool() const { return data_ != nullptr; } + inline operator bool() const { return data_ != nullptr; } - ncrypto::BignumPointer ToBN() const { + inline ncrypto::BignumPointer ToBN() const { return ncrypto::BignumPointer(data(), size()); } @@ -518,7 +525,7 @@ void ThrowCryptoError(Environment* env, unsigned long err, // NOLINT(runtime/int) const char* message = nullptr); -class CipherPushContext { +class CipherPushContext final { public: inline explicit CipherPushContext(Environment* env) : list_(env->isolate()), env_(env) {} @@ -546,16 +553,13 @@ void array_push_back(const TypeName* evp_ref, const char* from, const char* to, void* arg) { - if (!from) - return; + if (!from) return; const TypeName* real_instance = getbyname(from); - if (!real_instance) - return; + if (!real_instance) return; const char* real_name = getname(real_instance); - if (!real_name) - return; + if (!real_name) return; // EVP_*_fetch() does not support alias names, so we need to pass it the // real/original algorithm name. @@ -564,8 +568,7 @@ void array_push_back(const TypeName* evp_ref, // algorithms are used internally by OpenSSL and are also passed to this // callback). TypeName* fetched = fetch_type(nullptr, real_name, nullptr); - if (!fetched) - return; + if (!fetched) return; free_type(fetched); static_cast(arg)->push_back(from); @@ -576,8 +579,7 @@ void array_push_back(const TypeName* evp_ref, const char* from, const char* to, void* arg) { - if (!from) - return; + if (!from) return; static_cast(arg)->push_back(from); } #endif @@ -590,7 +592,7 @@ inline bool IsAnyBufferSource(v8::Local arg) { } template -class ArrayBufferOrViewContents { +class ArrayBufferOrViewContents final { public: ArrayBufferOrViewContents() = default; ArrayBufferOrViewContents(const ArrayBufferOrViewContents&) = delete; @@ -707,8 +709,7 @@ v8::Maybe SetEncodedValue(Environment* env, const BIGNUM* bn, int size = 0); -bool SetRsaOaepLabel(const ncrypto::EVPKeyCtxPointer& rsa, - const ByteSource& label); +bool SetRsaOaepLabel(ncrypto::EVPKeyCtxPointer* rsa, const ByteSource& label); namespace Util { void Initialize(Environment* env, v8::Local target); diff --git a/src/crypto/crypto_x509.cc b/src/crypto/crypto_x509.cc index 3465454e4de4a7..782eced277518d 100644 --- a/src/crypto/crypto_x509.cc +++ b/src/crypto/crypto_x509.cc @@ -21,13 +21,12 @@ using ncrypto::ClearErrorOnReturn; using ncrypto::DataPointer; using ncrypto::ECKeyPointer; using ncrypto::SSLPointer; -using ncrypto::StackOfASN1; using ncrypto::X509Pointer; using ncrypto::X509View; using v8::Array; using v8::ArrayBuffer; using v8::ArrayBufferView; -using v8::BackingStore; +using v8::BackingStoreInitializationMode; using v8::Boolean; using v8::Context; using v8::Date; @@ -38,6 +37,7 @@ using v8::FunctionTemplate; using v8::Integer; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::MaybeLocal; using v8::NewStringType; using v8::Object; @@ -55,14 +55,13 @@ ManagedX509::ManagedX509(const ManagedX509& that) { ManagedX509& ManagedX509::operator=(const ManagedX509& that) { cert_.reset(that.get()); - - if (cert_) + if (cert_) [[likely]] X509_up_ref(cert_.get()); - return *this; } void ManagedX509::MemoryInfo(MemoryTracker* tracker) const { + if (!cert_) return; // This is an approximation based on the der encoding size. int size = i2d_X509(cert_.get(), nullptr); tracker->TrackFieldWithSize("cert", size); @@ -94,7 +93,8 @@ void Fingerprint(const FunctionCallbackInfo& args) { } MaybeLocal ToV8Value(Local context, BIOPointer&& bio) { - if (!bio) return {}; + if (!bio) [[unlikely]] + return {}; BUF_MEM* mem = bio; Local ret; if (!String::NewFromUtf8(context->GetIsolate(), @@ -135,7 +135,7 @@ MaybeLocal ToV8Value(Local context, const ASN1_STRING* str) { // not escape anything. unsigned char* value_str; int value_str_size = ASN1_STRING_to_UTF8(&value_str, str); - if (value_str_size < 0) { + if (value_str_size < 0) [[unlikely]] { return Undefined(context->GetIsolate()); } DataPointer free_value_str(value_str, value_str_size); @@ -152,7 +152,8 @@ MaybeLocal ToV8Value(Local context, const ASN1_STRING* str) { } MaybeLocal ToV8Value(Local context, const BIOPointer& bio) { - if (!bio) return {}; + if (!bio) [[unlikely]] + return {}; BUF_MEM* mem = bio; Local ret; if (!String::NewFromUtf8(context->GetIsolate(), @@ -165,7 +166,8 @@ MaybeLocal ToV8Value(Local context, const BIOPointer& bio) { } MaybeLocal ToBuffer(Environment* env, BIOPointer* bio) { - if (bio == nullptr || !*bio) return {}; + if (bio == nullptr || !*bio) [[unlikely]] + return {}; BUF_MEM* mem = *bio; auto backing = ArrayBuffer::NewBackingStore( mem->data, @@ -183,7 +185,8 @@ MaybeLocal ToBuffer(Environment* env, BIOPointer* bio) { MaybeLocal GetDer(Environment* env, const X509View& view) { Local ret; auto bio = view.toDER(); - if (!bio) return Undefined(env->isolate()); + if (!bio) [[unlikely]] + return Undefined(env->isolate()); if (!ToBuffer(env, &bio).ToLocal(&ret)) { return {}; } @@ -194,7 +197,8 @@ MaybeLocal GetSubjectAltNameString(Environment* env, const X509View& view) { Local ret; auto bio = view.getSubjectAltName(); - if (!bio) return Undefined(env->isolate()); + if (!bio) [[unlikely]] + return Undefined(env->isolate()); if (!ToV8Value(env->context(), bio).ToLocal(&ret)) return {}; return ret; } @@ -202,7 +206,8 @@ MaybeLocal GetSubjectAltNameString(Environment* env, MaybeLocal GetInfoAccessString(Environment* env, const X509View& view) { Local ret; auto bio = view.getInfoAccess(); - if (!bio) return Undefined(env->isolate()); + if (!bio) [[unlikely]] + return Undefined(env->isolate()); if (!ToV8Value(env->context(), bio).ToLocal(&ret)) { return {}; } @@ -212,7 +217,8 @@ MaybeLocal GetInfoAccessString(Environment* env, const X509View& view) { MaybeLocal GetValidFrom(Environment* env, const X509View& view) { Local ret; auto bio = view.getValidFrom(); - if (!bio) return Undefined(env->isolate()); + if (!bio) [[unlikely]] + return Undefined(env->isolate()); if (!ToV8Value(env->context(), bio).ToLocal(&ret)) { return {}; } @@ -222,7 +228,8 @@ MaybeLocal GetValidFrom(Environment* env, const X509View& view) { MaybeLocal GetValidTo(Environment* env, const X509View& view) { Local ret; auto bio = view.getValidTo(); - if (!bio) return Undefined(env->isolate()); + if (!bio) [[unlikely]] + return Undefined(env->isolate()); if (!ToV8Value(env->context(), bio).ToLocal(&ret)) { return {}; } @@ -248,25 +255,12 @@ MaybeLocal GetSerialNumber(Environment* env, const X509View& view) { } MaybeLocal GetKeyUsage(Environment* env, const X509View& cert) { - StackOfASN1 eku(static_cast( - X509_get_ext_d2i(cert.get(), NID_ext_key_usage, nullptr, nullptr))); - if (eku) { - const int count = sk_ASN1_OBJECT_num(eku.get()); - MaybeStackBuffer, 16> ext_key_usage(count); - char buf[256]; - - int j = 0; - for (int i = 0; i < count; i++) { - if (OBJ_obj2txt( - buf, sizeof(buf), sk_ASN1_OBJECT_value(eku.get(), i), 1) >= 0) { - ext_key_usage[j++] = OneByteString(env->isolate(), buf); - } - } - - return Array::New(env->isolate(), ext_key_usage.out(), count); - } - - return Undefined(env->isolate()); + LocalVector vec(env->isolate()); + bool res = cert.enumUsages([&](std::string_view view) { + vec.push_back(OneByteString(env->isolate(), view)); + }); + if (!res) return Undefined(env->isolate()); + return Array::New(env->isolate(), vec.data(), vec.size()); } void Pem(const FunctionCallbackInfo& args) { @@ -387,7 +381,7 @@ void PublicKey(const FunctionCallbackInfo& args) { // TODO(tniessen): consider checking X509_get_pubkey() when the // X509Certificate object is being created. auto result = cert->view().getPublicKey(); - if (!result.value) { + if (!result.value) [[unlikely]] { ThrowCryptoError(env, result.error.value_or(0)); return; } @@ -547,7 +541,9 @@ void Parse(const FunctionCallbackInfo& args) { .len = buf.length(), }); - if (!result.value) return ThrowCryptoError(env, result.error.value_or(0)); + if (!result.value) [[unlikely]] { + return ThrowCryptoError(env, result.error.value_or(0)); + } if (X509Certificate::New(env, std::move(result.value)).ToLocal(&cert)) { args.GetReturnValue().Set(cert); @@ -571,7 +567,8 @@ bool Set(Environment* env, Local name, MaybeLocal maybe_value) { Local value; - if (!maybe_value.ToLocal(&value)) return false; + if (!maybe_value.ToLocal(&value)) [[unlikely]] + return false; // Undefined is ignored, but still considered successful if (value->IsUndefined()) return true; @@ -585,7 +582,8 @@ bool Set(Environment* env, uint32_t index, MaybeLocal maybe_value) { Local value; - if (!maybe_value.ToLocal(&value)) return false; + if (!maybe_value.ToLocal(&value)) [[unlikely]] + return false; // Undefined is ignored, but still considered successful if (value->IsUndefined()) return true; @@ -664,33 +662,32 @@ static MaybeLocal GetX509NameObject(Environment* env, return result; } -MaybeLocal GetPubKey(Environment* env, OSSL3_CONST RSA* rsa) { +MaybeLocal GetPubKey(Environment* env, const ncrypto::Rsa& rsa) { int size = i2d_RSA_PUBKEY(rsa, nullptr); CHECK_GE(size, 0); - std::unique_ptr bs; - { - NoArrayBufferZeroFillScope no_zero_fill_scope(env->isolate_data()); - bs = ArrayBuffer::NewBackingStore(env->isolate(), size); - } + auto bs = ArrayBuffer::NewBackingStore( + env->isolate(), size, BackingStoreInitializationMode::kUninitialized); - unsigned char* serialized = reinterpret_cast(bs->Data()); + auto serialized = reinterpret_cast(bs->Data()); CHECK_GE(i2d_RSA_PUBKEY(rsa, &serialized), 0); - Local ab = ArrayBuffer::New(env->isolate(), std::move(bs)); + auto ab = ArrayBuffer::New(env->isolate(), std::move(bs)); return Buffer::New(env, ab, 0, ab->ByteLength()).FromMaybe(Local()); } MaybeLocal GetModulusString(Environment* env, const BIGNUM* n) { auto bio = BIOPointer::New(n); - if (!bio) return {}; + if (!bio) [[unlikely]] + return {}; return ToV8Value(env->context(), bio); } MaybeLocal GetExponentString(Environment* env, const BIGNUM* e) { uint64_t exponent_word = static_cast(BignumPointer::GetWord(e)); auto bio = BIOPointer::NewMem(); - if (!bio) return {}; + if (!bio) [[unlikely]] + return {}; BIO_printf(bio.get(), "0x%" PRIx64, exponent_word); return ToV8Value(env->context(), bio); } @@ -699,14 +696,16 @@ MaybeLocal GetECPubKey(Environment* env, const EC_GROUP* group, OSSL3_CONST EC_KEY* ec) { const auto pubkey = ECKeyPointer::GetPublicKey(ec); - if (pubkey == nullptr) return Undefined(env->isolate()); + if (pubkey == nullptr) [[unlikely]] + return Undefined(env->isolate()); return ECPointToBuffer(env, group, pubkey, EC_KEY_get_conv_form(ec), nullptr) .FromMaybe(Local()); } MaybeLocal GetECGroupBits(Environment* env, const EC_GROUP* group) { - if (group == nullptr) return Undefined(env->isolate()); + if (group == nullptr) [[unlikely]] + return Undefined(env->isolate()); int bits = EC_GROUP_order_bits(group); if (bits <= 0) return Undefined(env->isolate()); @@ -716,10 +715,9 @@ MaybeLocal GetECGroupBits(Environment* env, const EC_GROUP* group) { template MaybeLocal GetCurveName(Environment* env, const int nid) { - const char* name = nid2string(nid); - return name != nullptr - ? MaybeLocal(OneByteString(env->isolate(), name)) - : MaybeLocal(Undefined(env->isolate())); + std::string_view name = nid2string(nid); + return name.size() ? MaybeLocal(OneByteString(env->isolate(), name)) + : MaybeLocal(Undefined(env->isolate())); } MaybeLocal X509ToObject(Environment* env, const X509View& cert) { @@ -745,68 +743,68 @@ MaybeLocal X509ToObject(Environment* env, const X509View& cert) { !Set(env, info, env->ca_string(), - Boolean::New(env->isolate(), cert.isCA()))) { + Boolean::New(env->isolate(), cert.isCA()))) [[unlikely]] { return {}; } - OSSL3_CONST EVP_PKEY* pkey = X509_get0_pubkey(cert.get()); - OSSL3_CONST RSA* rsa = nullptr; - OSSL3_CONST EC_KEY* ec = nullptr; - if (pkey != nullptr) { - switch (EVP_PKEY_id(pkey)) { - case EVP_PKEY_RSA: - rsa = EVP_PKEY_get0_RSA(pkey); - break; - case EVP_PKEY_EC: - ec = EVP_PKEY_get0_EC_KEY(pkey); - break; - } + if (!cert.ifRsa([&](const ncrypto::Rsa& rsa) { + auto pub_key = rsa.getPublicKey(); + if (!Set(env, + info, + env->modulus_string(), + GetModulusString(env, pub_key.n)) || + !Set(env, + info, + env->bits_string(), + Integer::New(env->isolate(), + BignumPointer::GetBitCount(pub_key.n))) || + !Set(env, + info, + env->exponent_string(), + GetExponentString(env, pub_key.e)) || + !Set(env, info, env->pubkey_string(), GetPubKey(env, rsa))) + [[unlikely]] { + return false; + } + return true; + })) [[unlikely]] { + return {}; } - if (rsa) { - const BIGNUM* n; - const BIGNUM* e; - RSA_get0_key(rsa, &n, &e, nullptr); - if (!Set( - env, info, env->modulus_string(), GetModulusString(env, n)) || - !Set( - env, - info, - env->bits_string(), - Integer::New(env->isolate(), BignumPointer::GetBitCount(n))) || - !Set( - env, info, env->exponent_string(), GetExponentString(env, e)) || - !Set(env, info, env->pubkey_string(), GetPubKey(env, rsa))) { - return {}; - } - } else if (ec) { - const auto group = ECKeyPointer::GetGroup(ec); + if (!cert.ifEc([&](const ncrypto::Ec& ec) { + const auto group = ec.getGroup(); - if (!Set( - env, info, env->bits_string(), GetECGroupBits(env, group)) || - !Set( - env, info, env->pubkey_string(), GetECPubKey(env, group, ec))) { - return {}; - } + if (!Set( + env, info, env->bits_string(), GetECGroupBits(env, group)) || + !Set( + env, info, env->pubkey_string(), GetECPubKey(env, group, ec))) + [[unlikely]] { + return false; + } - const int nid = EC_GROUP_get_curve_name(group); - if (nid != 0) { - // Curve is well-known, get its OID and NIST nick-name (if it has one). - - if (!Set(env, - info, - env->asn1curve_string(), - GetCurveName(env, nid)) || - !Set(env, - info, - env->nistcurve_string(), - GetCurveName(env, nid))) { - return {}; - } - } else { - // Unnamed curves can be described by their mathematical properties, - // but aren't used much (at all?) with X.509/TLS. Support later if needed. - } + const int nid = ec.getCurve(); + if (nid != 0) [[likely]] { + // Curve is well-known, get its OID and NIST nick-name (if it has + // one). + + if (!Set(env, + info, + env->asn1curve_string(), + GetCurveName(env, nid)) || + !Set(env, + info, + env->nistcurve_string(), + GetCurveName(env, nid))) + [[unlikely]] { + return false; + } + } + // Unnamed curves can be described by their mathematical properties, + // but aren't used much (at all?) with X.509/TLS. Support later if + // needed. + return true; + })) [[unlikely]] { + return {}; } if (!Set( @@ -828,7 +826,8 @@ MaybeLocal X509ToObject(Environment* env, const X509View& cert) { env, info, env->ext_key_usage_string(), GetKeyUsage(env, cert)) || !Set( env, info, env->serial_number_string(), GetSerialNumber(env, cert)) || - !Set(env, info, env->raw_string(), GetDer(env, cert))) { + !Set(env, info, env->raw_string(), GetDer(env, cert))) + [[unlikely]] { return {}; } @@ -910,7 +909,8 @@ MaybeLocal X509Certificate::New(Environment* env, MaybeLocal X509Certificate::GetCert(Environment* env, const SSLPointer& ssl) { auto cert = X509View::From(ssl); - if (!cert) return {}; + if (!cert) [[unlikely]] + return {}; return New(env, cert.clone()); } @@ -930,7 +930,7 @@ MaybeLocal X509Certificate::GetPeerCert(Environment* env, if (!cert && (ssl_certs == nullptr || sk_X509_num(ssl_certs) == 0)) return MaybeLocal(); - if (!cert) { + if (!cert) [[unlikely]] { cert.reset(sk_X509_value(ssl_certs, 0)); sk_X509_delete(ssl_certs, 0); } @@ -945,7 +945,8 @@ v8::MaybeLocal X509Certificate::toObject(Environment* env) { v8::MaybeLocal X509Certificate::toObject(Environment* env, const X509View& cert) { - if (!cert) return {}; + if (!cert) [[unlikely]] + return {}; return X509ToObject(env, cert).FromMaybe(Local()); } @@ -979,7 +980,7 @@ X509Certificate::X509CertificateTransferData::Deserialize( Environment* env, Local context, std::unique_ptr self) { - if (context != env->context()) { + if (context != env->context()) [[unlikely]] { THROW_ERR_MESSAGE_TARGET_CONTEXT_UNAVAILABLE(env); return {}; } diff --git a/src/crypto/crypto_x509.h b/src/crypto/crypto_x509.h index 54f4b2a40732d2..39f1878be1925f 100644 --- a/src/crypto/crypto_x509.h +++ b/src/crypto/crypto_x509.h @@ -25,10 +25,10 @@ class ManagedX509 final : public MemoryRetainer { ManagedX509(const ManagedX509& that); ManagedX509& operator=(const ManagedX509& that); - operator bool() const { return !!cert_; } - X509* get() const { return cert_.get(); } - ncrypto::X509View view() const { return cert_; } - operator ncrypto::X509View() const { return cert_; } + inline operator bool() const { return !!cert_; } + inline X509* get() const { return cert_.get(); } + inline ncrypto::X509View view() const { return cert_; } + inline operator ncrypto::X509View() const { return cert_; } void MemoryInfo(MemoryTracker* tracker) const override; SET_MEMORY_INFO_NAME(ManagedX509) @@ -77,7 +77,7 @@ class X509Certificate final : public BaseObject { } inline ncrypto::X509View view() const { return *cert_; } - X509* get() { return cert_->get(); } + inline X509* get() { return cert_->get(); } v8::MaybeLocal toObject(Environment* env); static v8::MaybeLocal toObject(Environment* env, From 575251ae6a9f0dd25b0198b8ccd41e4d368c3a8a Mon Sep 17 00:00:00 2001 From: Burkov Egor Date: Wed, 22 Jan 2025 16:16:31 +0300 Subject: [PATCH 140/158] src: add nullptr handling from X509_STORE_new() In openssl we should check result of X509_STORE_new() for nullptr Refs: https://github.com/nodejs/node/issues/56694 PR-URL: https://github.com/nodejs/node/pull/56700 Reviewed-By: James M Snell Reviewed-By: Luigi Pinca --- src/crypto/crypto_context.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crypto/crypto_context.cc b/src/crypto/crypto_context.cc index da5cebb87a3d51..af7ac2e5e8eba9 100644 --- a/src/crypto/crypto_context.cc +++ b/src/crypto/crypto_context.cc @@ -272,6 +272,7 @@ X509_STORE* NewRootCertStore() { } X509_STORE* store = X509_STORE_new(); + CHECK_NOT_NULL(store); if (*system_cert_path != '\0') { ERR_set_mark(); X509_STORE_load_locations(store, system_cert_path, nullptr); From 85f7bbf4e4fbdfe1817b7c9f0dcee11c5ae0c134 Mon Sep 17 00:00:00 2001 From: Luigi Pinca Date: Sun, 26 Jan 2025 17:41:21 +0100 Subject: [PATCH 141/158] test: do not use common.isMainThread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `common.isMainThread` was removed in https://github.com/nodejs/node/commit/8caa1dcee63b2c6fd7a9, use the `isMainThread` export of the `worker_threads` module instead. PR-URL: https://github.com/nodejs/node/pull/56768 Reviewed-By: Michaël Zasso Reviewed-By: James M Snell Reviewed-By: Chengzhong Wu Reviewed-By: Matteo Collina --- test/parallel/test-require-resolve-opts-paths-relative.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/parallel/test-require-resolve-opts-paths-relative.js b/test/parallel/test-require-resolve-opts-paths-relative.js index 522a1fdbce82a4..13d17d478b753d 100644 --- a/test/parallel/test-require-resolve-opts-paths-relative.js +++ b/test/parallel/test-require-resolve-opts-paths-relative.js @@ -3,8 +3,9 @@ const common = require('../common'); const assert = require('assert'); const fixtures = require('../common/fixtures'); +const { isMainThread } = require('worker_threads'); -if (!common.isMainThread) +if (!isMainThread) common.skip('process.chdir is not available in Workers'); const subdir = fixtures.path('module-require', 'relative', 'subdir'); From e49f3e944c665fcad2d227459190918d04e1c01a Mon Sep 17 00:00:00 2001 From: James M Snell Date: Sun, 26 Jan 2025 09:19:39 -0800 Subject: [PATCH 142/158] test: cleanup and simplify test-crypto-aes-wrap * Add comment explaining purpose of the test * Eliminate duplicative/extraneous buffer allocations PR-URL: https://github.com/nodejs/node/pull/56748 Reviewed-By: Yagiz Nizipli Reviewed-By: Richard Lau Reviewed-By: Luigi Pinca Reviewed-By: Matteo Collina --- test/parallel/test-crypto-aes-wrap.js | 58 +++++++++++++-------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/test/parallel/test-crypto-aes-wrap.js b/test/parallel/test-crypto-aes-wrap.js index 6fe35258f7d6b2..21d48d8a3fbae7 100644 --- a/test/parallel/test-crypto-aes-wrap.js +++ b/test/parallel/test-crypto-aes-wrap.js @@ -1,62 +1,60 @@ 'use strict'; const common = require('../common'); -if (!common.hasCrypto) +if (!common.hasCrypto) { common.skip('missing crypto'); +} + +// Tests that the AES wrap and unwrap functions are working correctly. const assert = require('assert'); const crypto = require('crypto'); -const test = [ +const ivShort = Buffer.from('3fd838af', 'hex'); +const ivLong = Buffer.from('3fd838af4093d749', 'hex'); +const key1 = Buffer.from('b26f309fbe57e9b3bb6ae5ef31d54450', 'hex'); +const key2 = Buffer.from('40978085d68091f7dfca0d7dfc7a5ee76d2cc7f2f345a304', 'hex'); +const key3 = Buffer.from('29c9eab5ed5ad44134a1437fe2e673b4d88a5b7c72e68454fea08721392b7323', 'hex'); + +[ { algorithm: 'aes128-wrap', - key: 'b26f309fbe57e9b3bb6ae5ef31d54450', - iv: '3fd838af4093d749', + key: key1, + iv: ivLong, text: '12345678123456781234567812345678' }, { algorithm: 'id-aes128-wrap-pad', - key: 'b26f309fbe57e9b3bb6ae5ef31d54450', - iv: '3fd838af', + key: key1, + iv: ivShort, text: '12345678123456781234567812345678123' }, { algorithm: 'aes192-wrap', - key: '40978085d68091f7dfca0d7dfc7a5ee76d2cc7f2f345a304', - iv: '3fd838af4093d749', + key: key2, + iv: ivLong, text: '12345678123456781234567812345678' }, { algorithm: 'id-aes192-wrap-pad', - key: '40978085d68091f7dfca0d7dfc7a5ee76d2cc7f2f345a304', - iv: '3fd838af', + key: key2, + iv: ivShort, text: '12345678123456781234567812345678123' }, { algorithm: 'aes256-wrap', - key: '29c9eab5ed5ad44134a1437fe2e673b4d88a5b7c72e68454fea08721392b7323', - iv: '3fd838af4093d749', + key: key3, + iv: ivLong, text: '12345678123456781234567812345678' }, { algorithm: 'id-aes256-wrap-pad', - key: '29c9eab5ed5ad44134a1437fe2e673b4d88a5b7c72e68454fea08721392b7323', - iv: '3fd838af', + key: key3, + iv: ivShort, text: '12345678123456781234567812345678123' }, -]; - -test.forEach((data) => { - const cipher = crypto.createCipheriv( - data.algorithm, - Buffer.from(data.key, 'hex'), - Buffer.from(data.iv, 'hex')); - const ciphertext = cipher.update(data.text, 'utf8'); - - const decipher = crypto.createDecipheriv( - data.algorithm, - Buffer.from(data.key, 'hex'), - Buffer.from(data.iv, 'hex')); - const msg = decipher.update(ciphertext, 'buffer', 'utf8'); - - assert.strictEqual(msg, data.text, `${data.algorithm} test case failed`); +].forEach(({ algorithm, key, iv, text }) => { + const cipher = crypto.createCipheriv(algorithm, key, iv); + const decipher = crypto.createDecipheriv(algorithm, key, iv); + const msg = decipher.update(cipher.update(text, 'utf8'), 'buffer', 'utf8'); + assert.strictEqual(msg, text, `${algorithm} test case failed`); }); From ad012ca1f3b84639fb081882923def33f54990a2 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Sun, 26 Jan 2025 19:31:35 +0100 Subject: [PATCH 143/158] doc: improve accessibility of expandable lists MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56749 Reviewed-By: James M Snell Reviewed-By: Ulises Gascón Reviewed-By: Claudio Wunder --- doc/api_assets/api.js | 6 +++++- doc/api_assets/style.css | 17 ++++++----------- doc/template.html | 6 +++--- tools/doc/html.mjs | 12 ++++++------ 4 files changed, 20 insertions(+), 21 deletions(-) diff --git a/doc/api_assets/api.js b/doc/api_assets/api.js index 394b5ba990946c..e86f110e0346bf 100644 --- a/doc/api_assets/api.js +++ b/doc/api_assets/api.js @@ -41,6 +41,7 @@ function closeAllPickers() { for (const picker of pickers) { picker.parentNode.classList.remove('expanded'); + picker.ariaExpanded = false; } window.removeEventListener('click', closeAllPickers); @@ -58,6 +59,7 @@ for (const picker of pickers) { const parentNode = picker.parentNode; + picker.ariaExpanded = parentNode.classList.contains('expanded'); picker.addEventListener('click', function(e) { e.preventDefault(); @@ -65,7 +67,7 @@ closeAllPickers as window event trigger already closed all the pickers, if it already closed there is nothing else to do here */ - if (parentNode.classList.contains('expanded')) { + if (picker.ariaExpanded === 'true') { return; } @@ -75,9 +77,11 @@ */ requestAnimationFrame(function() { + picker.ariaExpanded = true; parentNode.classList.add('expanded'); window.addEventListener('click', closeAllPickers); window.addEventListener('keydown', onKeyDown); + parentNode.querySelector('.picker a').focus(); }); }); } diff --git a/doc/api_assets/style.css b/doc/api_assets/style.css index 28a284e3b975b8..a40990a39252a4 100644 --- a/doc/api_assets/style.css +++ b/doc/api_assets/style.css @@ -182,22 +182,15 @@ li.picker-header .picker-arrow { height: .6rem; border-top: .3rem solid transparent; border-bottom: .3rem solid transparent; - border-left: .6rem solid var(--color-links); + border-left: .6rem solid currentColor; border-right: none; margin: 0 .2rem .05rem 0; } -li.picker-header a:focus .picker-arrow, -li.picker-header a:active .picker-arrow, -li.picker-header a:hover .picker-arrow { - border-left: .6rem solid var(--white); -} - -li.picker-header.expanded a:focus .picker-arrow, -li.picker-header.expanded a:active .picker-arrow, -li.picker-header.expanded a:hover .picker-arrow, +li.picker-header.expanded .picker-arrow, +:root:not(.has-js) li.picker-header:focus-within .picker-arrow, :root:not(.has-js) li.picker-header:hover .picker-arrow { - border-top: .6rem solid var(--white); + border-top: .6rem solid currentColor; border-bottom: none; border-left: .35rem solid transparent; border-right: .35rem solid transparent; @@ -205,11 +198,13 @@ li.picker-header.expanded a:hover .picker-arrow, } li.picker-header.expanded > a, +:root:not(.has-js) li.picker-header:focus-within > a, :root:not(.has-js) li.picker-header:hover > a { border-radius: 2px 2px 0 0; } li.picker-header.expanded > .picker, +:root:not(.has-js) li.picker-header:focus-within > .picker, :root:not(.has-js) li.picker-header:hover > .picker { display: block; z-index: 1; diff --git a/doc/template.html b/doc/template.html index ab8be0e747f492..51e789b7e6168c 100644 --- a/doc/template.html +++ b/doc/template.html @@ -59,13 +59,13 @@

Node.js __VERSION__ documentation

__GTOC_PICKER__ __ALTDOCS__
  • - + Options -
    -
      +
      +
      • View on single page
      • diff --git a/tools/doc/html.mjs b/tools/doc/html.mjs index 68762d89e048ce..d61d335c7b8957 100644 --- a/tools/doc/html.mjs +++ b/tools/doc/html.mjs @@ -527,11 +527,11 @@ function altDocs(filename, docCreated, versions) { return list ? `
      • - + Other versions -
          ${list}
        +
          ${list}
      • ` : ''; } @@ -557,12 +557,12 @@ function gtocPicker(id) { return `
      • - + Index -
        ${gtoc}
        +
        ${gtoc}
      • `; } @@ -574,12 +574,12 @@ function tocPicker(id, content) { return `
      • - + Table of contents -
        ${content.tocPicker}
        +
        ${content.tocPicker.replace('
      • `; } From 09fb3adf80ba4c42dc33f51c8333d812d5fbfcc0 Mon Sep 17 00:00:00 2001 From: Antoine du Hamel Date: Sun, 26 Jan 2025 19:41:45 +0100 Subject: [PATCH 144/158] doc: add "Skip to content" button MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/56750 Reviewed-By: James M Snell Reviewed-By: Ulises Gascón Reviewed-By: Claudio Wunder --- doc/api_assets/style.css | 13 +++++++++++++ doc/template.html | 1 + 2 files changed, 14 insertions(+) diff --git a/doc/api_assets/style.css b/doc/api_assets/style.css index a40990a39252a4..35c216bb0523fc 100644 --- a/doc/api_assets/style.css +++ b/doc/api_assets/style.css @@ -122,6 +122,19 @@ a.type { font-size: .9em; } +.skip-to-content { + position: fixed; + top: -300%; +} +.skip-to-content:focus { + display: block; + top: 0; + left: 0; + background-color: var(--green1); + padding: 1rem; + z-index: 999999; +} + #content { position: relative; } diff --git a/doc/template.html b/doc/template.html index 51e789b7e6168c..34edf068df5c8d 100644 --- a/doc/template.html +++ b/doc/template.html @@ -26,6 +26,7 @@ __JS_FLAVORED_DYNAMIC_CSS__ + Skip to content