-
-
Notifications
You must be signed in to change notification settings - Fork 757
feat: add configurable maxPayloadSize for WebSocket #4955
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
c1c9c0b
aba708d
b8b6b2c
89b2df0
c0e871e
549cc0b
90942cd
08da222
22fe151
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -39,18 +39,23 @@ class ByteParser extends Writable { | |
| /** @type {import('./websocket').Handler} */ | ||
| #handler | ||
|
|
||
| /** @type {number} */ | ||
| #maxPayloadSize | ||
|
|
||
| /** | ||
| * @param {import('./websocket').Handler} handler | ||
| * @param {Map<string, string>|null} extensions | ||
| * @param {{ maxPayloadSize?: number }} [options] | ||
| */ | ||
| constructor (handler, extensions) { | ||
| constructor (handler, extensions, options = {}) { | ||
| super() | ||
|
|
||
| this.#handler = handler | ||
| this.#extensions = extensions == null ? new Map() : extensions | ||
| this.#maxPayloadSize = options.maxPayloadSize ?? 0 | ||
|
|
||
| if (this.#extensions.has('permessage-deflate')) { | ||
| this.#extensions.set('permessage-deflate', new PerMessageDeflate(extensions)) | ||
| this.#extensions.set('permessage-deflate', new PerMessageDeflate(extensions, options)) | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -66,6 +71,20 @@ class ByteParser extends Writable { | |
| this.run(callback) | ||
| } | ||
|
|
||
| #validatePayloadLength () { | ||
| if ( | ||
| this.#maxPayloadSize > 0 && | ||
| !isControlFrame(this.#info.opcode) && | ||
| !this.#info.compressed && | ||
| this.#fragmentsBytes + this.#info.payloadLength > this.#maxPayloadSize | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
No. The idea is that this check interprets "already buffered fragments + current frame". |
||
| ) { | ||
| failWebsocketConnection(this.#handler, 1009, 'Payload size exceeds maximum allowed size') | ||
| return false | ||
| } | ||
|
|
||
| return true | ||
| } | ||
|
|
||
| /** | ||
| * Runs whenever a new chunk is received. | ||
| * Callback is called whenever there are no more chunks buffering, | ||
|
|
@@ -169,6 +188,10 @@ class ByteParser extends Writable { | |
| this.#info.masked = masked | ||
| this.#info.fin = fin | ||
| this.#info.fragmented = fragmented | ||
|
|
||
| if (this.#state === parserStates.READ_DATA && !this.#validatePayloadLength()) { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. there's only one branch where state can be set to READ_DATA from here, it should be moved |
||
| return | ||
| } | ||
| } else if (this.#state === parserStates.PAYLOADLENGTH_16) { | ||
| if (this.#byteOffset < 2) { | ||
| return callback() | ||
|
|
@@ -178,6 +201,10 @@ class ByteParser extends Writable { | |
|
|
||
| this.#info.payloadLength = buffer.readUInt16BE(0) | ||
| this.#state = parserStates.READ_DATA | ||
|
|
||
| if (!this.#validatePayloadLength()) { | ||
| return | ||
| } | ||
| } else if (this.#state === parserStates.PAYLOADLENGTH_64) { | ||
| if (this.#byteOffset < 8) { | ||
| return callback() | ||
|
|
@@ -200,6 +227,10 @@ class ByteParser extends Writable { | |
|
|
||
| this.#info.payloadLength = lower | ||
| this.#state = parserStates.READ_DATA | ||
|
|
||
| if (!this.#validatePayloadLength()) { | ||
| return | ||
| } | ||
| } else if (this.#state === parserStates.READ_DATA) { | ||
| if (this.#byteOffset < this.#info.payloadLength) { | ||
| return callback() | ||
|
|
@@ -224,29 +255,34 @@ class ByteParser extends Writable { | |
|
|
||
| this.#state = parserStates.INFO | ||
| } else { | ||
| this.#extensions.get('permessage-deflate').decompress(body, this.#info.fin, (error, data) => { | ||
| if (error) { | ||
| // Use 1009 (Message Too Big) for decompression size limit errors | ||
| const code = error instanceof MessageSizeExceededError ? 1009 : 1007 | ||
| failWebsocketConnection(this.#handler, code, error.message) | ||
| return | ||
| } | ||
|
|
||
| this.writeFragments(data) | ||
| this.#extensions.get('permessage-deflate').decompress( | ||
| body, | ||
| this.#info.fin, | ||
| (error, data) => { | ||
| if (error) { | ||
| // Use 1009 (Message Too Big) for decompression size limit errors | ||
| const code = error instanceof MessageSizeExceededError ? 1009 : 1007 | ||
| failWebsocketConnection(this.#handler, code, error.message) | ||
| return | ||
| } | ||
|
|
||
| this.writeFragments(data) | ||
|
|
||
| if (!this.#info.fin) { | ||
| this.#state = parserStates.INFO | ||
| this.#loop = true | ||
| this.run(callback) | ||
| return | ||
| } | ||
|
|
||
| websocketMessageReceived(this.#handler, this.#info.binaryType, this.consumeFragments()) | ||
|
|
||
| if (!this.#info.fin) { | ||
| this.#state = parserStates.INFO | ||
| this.#loop = true | ||
| this.#state = parserStates.INFO | ||
| this.run(callback) | ||
| return | ||
| } | ||
|
|
||
| websocketMessageReceived(this.#handler, this.#info.binaryType, this.consumeFragments()) | ||
|
|
||
| this.#loop = true | ||
| this.#state = parserStates.INFO | ||
| this.run(callback) | ||
| }) | ||
| }, | ||
| this.#fragmentsBytes | ||
|
KhafraDev marked this conversation as resolved.
|
||
| ) | ||
|
|
||
| this.#loop = false | ||
| break | ||
|
|
||

There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't understand why we ignore compressed messages here, but then perform the same logic in the permessage-deflate handler?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we are doing this kind of check in 3 places:
The 2nd could be optional, but it saves CPU cycles.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
but we're skipping compressed packets here, it's not much of an early return if you still have to go all the way to the decompression handler
what I'm asking for is why we're skipping compressed packets here
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are skipping compressed packets here because it's a different logic as @tsctx has asked.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The logic is not different.
Assuming we send an uncompressed and compressed frame that's 129MB:
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I understand that, but why skip them? Couldn't an attacker just send a compressed frame that has an unlimited size, where undici will collect all fragments, attempt to inflate, then fail much later?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The check is done chunk by chunk on compression, as soon as it goes over the limit, the connection is interrupted.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I’ve looked into it, and while we check the max length per message unit, we seem to be missing a check for the total buffered size of fragmented messages.
We don't actually need a separate check for compression; we should simply trigger an early exit if the accumulated size plus the current payload exceeds the limit. Even with compressed data, we can perform this early check if the payload itself is already too large.
This is already handled for uncompressed fragments, but for the compressed path, we should also pass the current buffered size to the decompression side to ensure the same validation.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've written some pseudo-code to illustrate the issue more clearly.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thank you @tsctx this is what I was trying to articulate, lol.
The issue is if the compressed data is larger than the maxPayloadSize, we shouldn't bother going to READ_DATA and inflating the data which is already larger than the maxPayloadSize.
The permessage-deflate handler catches it, once the decompressed data is larger than the maxPayloadSize. This only happens once we have a full frame, so an attacker could theoretically send n fragmented frames each with unlimited size that are kept in memory until we receive the entirety of the frame.
From READ_DATA, should be helpful in understanding: