diff --git a/src/node_platform.cc b/src/node_platform.cc index 9787cbb3edc2e2..532fd03b4a0c14 100644 --- a/src/node_platform.cc +++ b/src/node_platform.cc @@ -253,7 +253,9 @@ void PerIsolatePlatformData::PostTask(std::unique_ptr task) { } void PerIsolatePlatformData::PostDelayedTask( - std::unique_ptr task, double delay_in_seconds) { + std::unique_ptr task, + double delay_in_seconds, + DelayedTask::Nestability nestability) { if (flush_tasks_ == nullptr) { // V8 may post tasks during Isolate disposal. In that case, the only // sensible path forward is to discard the task. @@ -263,10 +265,17 @@ void PerIsolatePlatformData::PostDelayedTask( delayed->task = std::move(task); delayed->platform_data = shared_from_this(); delayed->timeout = delay_in_seconds; + delayed->nestability = nestability; foreground_delayed_tasks_.Push(std::move(delayed)); uv_async_send(flush_tasks_); } +void PerIsolatePlatformData::PostDelayedTask(std::unique_ptr task, + double delay_in_seconds) { + PostDelayedTask( + std::move(task), delay_in_seconds, DelayedTask::Nestability::kNestable); +} + void PerIsolatePlatformData::PostNonNestableTask(std::unique_ptr task) { PostTask(std::move(task)); } @@ -274,7 +283,9 @@ void PerIsolatePlatformData::PostNonNestableTask(std::unique_ptr task) { void PerIsolatePlatformData::PostNonNestableDelayedTask( std::unique_ptr task, double delay_in_seconds) { - PostDelayedTask(std::move(task), delay_in_seconds); + PostDelayedTask(std::move(task), + delay_in_seconds, + DelayedTask::Nestability::kNonNestable); } PerIsolatePlatformData::~PerIsolatePlatformData() { @@ -460,7 +471,12 @@ bool PerIsolatePlatformData::FlushForegroundTasksInternal() { // Timers may not guarantee queue ordering of events with the same delay if // the delay is non-zero. This should not be a problem in practice. uv_timer_start(&delayed->timer, RunForegroundTask, delay_millis, 0); - uv_unref(reinterpret_cast(&delayed->timer)); + // Do not unref non-nestable tasks which can run call into JS. + // For the moment all nestable tasks are from GC or logging so it's + // okay to unref the timer for them. + if (delayed->nestability == DelayedTask::Nestability::kNestable) { + uv_unref(reinterpret_cast(&delayed->timer)); + } uv_handle_count_++; scheduled_delayed_tasks_.emplace_back(delayed.release(), diff --git a/src/node_platform.h b/src/node_platform.h index 4a05f3bba58c8e..b5595a7500e893 100644 --- a/src/node_platform.h +++ b/src/node_platform.h @@ -43,10 +43,12 @@ class TaskQueue { }; struct DelayedTask { + enum class Nestability { kNestable, kNonNestable }; std::unique_ptr task; uv_timer_t timer; double timeout; std::shared_ptr platform_data; + Nestability nestability; }; // This acts as the foreground task runner for a given Isolate. @@ -86,6 +88,9 @@ class PerIsolatePlatformData : void DeleteFromScheduledTasks(DelayedTask* task); void DecreaseHandleCount(); + void PostDelayedTask(std::unique_ptr task, + double delay_in_seconds, + DelayedTask::Nestability nestability); static void FlushTasks(uv_async_t* handle); void RunForegroundTask(std::unique_ptr task); static void RunForegroundTask(uv_timer_t* timer); diff --git a/test/fixtures/atomics-wait-async-timeout.js b/test/fixtures/atomics-wait-async-timeout.js new file mode 100644 index 00000000000000..fee702fea80c76 --- /dev/null +++ b/test/fixtures/atomics-wait-async-timeout.js @@ -0,0 +1,6 @@ +'use strict'; + +const sab = new SharedArrayBuffer(16); +const i32a = new Int32Array(sab); +const result = Atomics.waitAsync(i32a, 0, 0, 1000); +result.value.then((val) => console.log(val)); diff --git a/test/parallel/test-atomics-wait-async-timeout-main-2.js b/test/parallel/test-atomics-wait-async-timeout-main-2.js new file mode 100644 index 00000000000000..007c7c54f805f6 --- /dev/null +++ b/test/parallel/test-atomics-wait-async-timeout-main-2.js @@ -0,0 +1,24 @@ +'use strict'; + +require('../common'); + +if (process.argv[2] === 'child') { + const sab = new SharedArrayBuffer(16); + const i32a = new Int32Array(sab); + const result = Atomics.waitAsync(i32a, 0, 0, 1000 * 1000); + result.value.then((val) => console.log(val)); + return; +} + +const { spawnSync } = require('child_process'); +const assert = require('assert'); + +const child = spawnSync(process.execPath, [ + __filename, 'child', +], { timeout: 1000 }); + +if (!child.error) { + console.log(child.stderr.toString()); + console.log(child.stdout.toString()); +} +assert.strictEqual(child.error.code, 'ETIMEDOUT'); diff --git a/test/parallel/test-atomics-wait-async-timeout-main.js b/test/parallel/test-atomics-wait-async-timeout-main.js new file mode 100644 index 00000000000000..383e3cae46af8c --- /dev/null +++ b/test/parallel/test-atomics-wait-async-timeout-main.js @@ -0,0 +1,19 @@ +'use strict'; + +require('../common'); + +if (process.argv[2] === 'child') { + const sab = new SharedArrayBuffer(16); + const i32a = new Int32Array(sab); + const result = Atomics.waitAsync(i32a, 0, 0, 1000); + result.value.then((val) => console.log(val)); + return; +} + +const { spawnSync } = require('child_process'); +const assert = require('assert'); + +const child = spawnSync(process.execPath, [__filename, 'child']); +assert.strictEqual(child.stdout.toString().trim(), 'timed-out'); +assert.strictEqual(child.stderr.toString().trim(), ''); +assert.strictEqual(child.status, 0); diff --git a/test/parallel/test-atomics-wait-async-timeout-worker-2.js b/test/parallel/test-atomics-wait-async-timeout-worker-2.js new file mode 100644 index 00000000000000..6e15c157441d35 --- /dev/null +++ b/test/parallel/test-atomics-wait-async-timeout-worker-2.js @@ -0,0 +1,28 @@ +'use strict'; + +const common = require('../common'); +const { Worker, workerData } = require('worker_threads'); +const assert = require('assert'); + +// Do not use isMainThread so that this test itself can be run inside a Worker. +if (!process.env.HAS_STARTED_WORKER) { + process.env.HAS_STARTED_WORKER = 1; + const sab = new SharedArrayBuffer(16); + const i32a = new Int32Array(sab); + const w = new Worker(__filename, { + workerData: i32a + }); + const chunks = []; + w.stdout.on('data', (chunk) => chunks.push(chunk)); + w.on('exit', common.mustCall((code) => { + assert.strictEqual(Buffer.concat(chunks).toString().trim(), ''); + assert.strictEqual(code, 1); + })); + setTimeout(() => { + w.terminate(); + }, 1000); +} else { + const i32a = workerData; + const result = Atomics.waitAsync(i32a, 0, 0, 1000 * 1000); + result.value.then((val) => console.log(val)); +} diff --git a/test/parallel/test-atomics-wait-async-timeout-worker.js b/test/parallel/test-atomics-wait-async-timeout-worker.js new file mode 100644 index 00000000000000..3dc304d2bf7503 --- /dev/null +++ b/test/parallel/test-atomics-wait-async-timeout-worker.js @@ -0,0 +1,23 @@ +'use strict'; + +const common = require('../common'); +const { Worker, workerData } = require('worker_threads'); +const assert = require('assert'); + +// Do not use isMainThread so that this test itself can be run inside a Worker. +if (!process.env.HAS_STARTED_WORKER) { + process.env.HAS_STARTED_WORKER = 1; + const sab = new SharedArrayBuffer(16); + const i32a = new Int32Array(sab); + const w = new Worker(__filename, { workerData: i32a }); + const chunks = []; + w.stdout.on('data', (chunk) => chunks.push(chunk)); + w.on('exit', common.mustCall((code) => { + assert.strictEqual(Buffer.concat(chunks).toString().trim(), 'timed-out'); + assert.strictEqual(code, 0); + })); +} else { + const i32a = workerData; + const result = Atomics.waitAsync(i32a, 0, 0, 1000); + result.value.then((val) => console.log(val)); +}