SETTINGS_MAX_HEADER_LIST_SIZE (#206)

This, uh, grew into something far bigger than expected, but it turns out, all of it was needed to eventually support this correctly.

- Adds configuration to client and server to set [SETTINGS_MAX_HEADER_LIST_SIZE](http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE)
- If not set, a "sane default" of 16 MB is used (taken from golang's http2)
- Decoding header blocks now happens as they are received, instead of buffering up possibly forever until the last continuation frame is parsed.
- As each field is decoded, it's undecoded size is added to the total. Whenever a header block goes over the maximum size, the `frame` will be marked as such.
- Whenever a header block is deemed over max limit, decoding will still continue, but new fields will not be appended to `HeaderMap`. This is also can save wasted hashing.
- To protect against enormous string literals, such that they span multiple continuation frames, a check is made that the combined encoded bytes is less than the max allowed size. While technically not exactly what the spec suggests (counting decoded size instead), this should hopefully only happen when someone is indeed malicious. If found, a `GOAWAY` of `COMPRESSION_ERROR` is sent, and the connection shut down.
- After an oversize header block frame is finished decoding, the streams state machine will notice it is oversize, and handle that.
  - If the local peer is a server, a 431 response is sent, as suggested by the spec.
  - A `REFUSED_STREAM` reset is sent, since we cannot actually give the stream to the user.
- In order to be able to send both the 431 headers frame, and a reset frame afterwards, the scheduled `Canceled` machinery was made more general to a `Scheduled(Reason)` state instead.

Closes #18 
Closes #191
This commit is contained in:
Sean McArthur
2018-01-05 09:23:48 -08:00
committed by GitHub
parent 6f7b826b0a
commit aa23a9735d
26 changed files with 752 additions and 226 deletions

View File

@@ -1,4 +1,5 @@
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use super::recv::RecvHeaderBlockError;
use super::store::{self, Entry, Resolve, Store};
use {client, proto, server};
use codec::{Codec, RecvError, SendError, UserError};
@@ -164,7 +165,28 @@ where
);
let res = if stream.state.is_recv_headers() {
actions.recv.recv_headers(frame, stream, counts)
match actions.recv.recv_headers(frame, stream, counts) {
Ok(()) => Ok(()),
Err(RecvHeaderBlockError::Oversize(resp)) => {
if let Some(resp) = resp {
let _ = actions.send.send_headers(
resp, send_buffer, stream, counts, &mut actions.task);
actions.send.schedule_implicit_reset(
stream,
Reason::REFUSED_STREAM,
&mut actions.task);
actions.recv.enqueue_reset_expiration(stream, counts);
Ok(())
} else {
Err(RecvError::Stream {
id: stream.id,
reason: Reason::REFUSED_STREAM,
})
}
},
Err(RecvHeaderBlockError::State(err)) => Err(err),
}
} else {
if !frame.is_end_stream() {
// TODO: Is this the right error
@@ -363,22 +385,42 @@ where
let me = &mut *me;
let id = frame.stream_id();
let promised_id = frame.promised_id();
let stream = match me.store.find_mut(&id) {
Some(stream) => stream.key(),
None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)),
let res = {
let stream = match me.store.find_mut(&id) {
Some(stream) => stream.key(),
None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)),
};
if me.counts.peer().is_server() {
// The remote is a client and cannot reserve
trace!("recv_push_promise; error remote is client");
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}
me.actions.recv.recv_push_promise(frame,
&me.actions.send,
stream,
&mut me.store)
};
if me.counts.peer().is_server() {
// The remote is a client and cannot reserve
trace!("recv_push_promise; error remote is client");
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}
if let Err(err) = res {
if let Some(ref mut new_stream) = me.store.find_mut(&promised_id) {
me.actions.recv.recv_push_promise(frame,
&me.actions.send,
stream,
&mut me.store)
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
me.actions.reset_on_recv_stream_err(&mut *send_buffer, new_stream, Err(err))
} else {
// If there was a stream error, the stream should have been stored
// so we can track sending a reset.
//
// Otherwise, this MUST be an connection error.
assert!(!err.is_stream_error());
Err(err)
}
} else {
res
}
}
pub fn next_incoming(&mut self) -> Option<StreamRef<B>> {
@@ -925,8 +967,9 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) {
if stream.is_canceled_interest() {
actions.send.schedule_cancel(
actions.send.schedule_implicit_reset(
stream,
Reason::CANCEL,
&mut actions.task);
actions.recv.enqueue_reset_expiration(stream, counts);
}