worker: prevent event loop starvation through MessagePorts

Limit the number of messages processed without interruption on a
given `MessagePort` to prevent event loop starvation, but still
make sure that all messages are emitted that were already in the
queue when emitting began.

This aligns the behaviour better with the web.

Refs: https://github.com/nodejs/node/pull/28030

PR-URL: https://github.com/nodejs/node/pull/29315
Reviewed-By: Gus Caplan <me@gus.host>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Jeremiah Senkpiel <fishrock123@rocketmail.com>
This commit is contained in:
Anna Henningsen 2019-06-02 17:23:50 +02:00
parent 821799024e
commit b34f05ecf2
No known key found for this signature in database
GPG Key ID: 9C63F3A6CD2AD8F9
3 changed files with 63 additions and 0 deletions

View File

@ -604,11 +604,30 @@ void MessagePort::OnMessage() {
HandleScope handle_scope(env()->isolate());
Local<Context> context = object(env()->isolate())->CreationContext();
size_t processing_limit;
{
Mutex::ScopedLock(data_->mutex_);
processing_limit = std::max(data_->incoming_messages_.size(),
static_cast<size_t>(1000));
}
// data_ can only ever be modified by the owner thread, so no need to lock.
// However, the message port may be transferred while it is processing
// messages, so we need to check that this handle still owns its `data_` field
// on every iteration.
while (data_) {
if (processing_limit-- == 0) {
// Prevent event loop starvation by only processing those messages without
// interruption that were already present when the OnMessage() call was
// first triggered, but at least 1000 messages because otherwise the
// overhead of repeatedly triggering the uv_async_t instance becomes
// noticable, at least on Windows.
// (That might require more investigation by somebody more familiar with
// Windows.)
TriggerAsync();
return;
}
HandleScope handle_scope(env()->isolate());
Context::Scope context_scope(context);

View File

@ -0,0 +1,15 @@
'use strict';
const common = require('../common');
const { MessageChannel } = require('worker_threads');
// Make sure that closing a message port while receiving messages on it does
// not stop messages that are already in the queue from being emitted.
const { port1, port2 } = new MessageChannel();
port1.on('message', common.mustCall(() => {
port1.close();
}, 2));
port2.postMessage('foo');
port2.postMessage('bar');

View File

@ -0,0 +1,29 @@
'use strict';
const common = require('../common');
const assert = require('assert');
const { MessageChannel } = require('worker_threads');
// Make sure that an infinite asynchronous .on('message')/postMessage loop
// does not lead to a stack overflow and does not starve the event loop.
// We schedule timeouts both from before the the .on('message') handler and
// inside of it, which both should run.
const { port1, port2 } = new MessageChannel();
let count = 0;
port1.on('message', () => {
if (count === 0) {
setTimeout(common.mustCall(() => {
port1.close();
}), 0);
}
port2.postMessage(0);
assert(count++ < 10000, `hit ${count} loop iterations`);
});
port2.postMessage(0);
// This is part of the test -- the event loop should be available and not stall
// out due to the recursive .postMessage() calls.
setTimeout(common.mustCall(), 0);