SETTINGS_MAX_HEADER_LIST_SIZE (#206)
This, uh, grew into something far bigger than expected, but it turns out, all of it was needed to eventually support this correctly. - Adds configuration to client and server to set [SETTINGS_MAX_HEADER_LIST_SIZE](http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE) - If not set, a "sane default" of 16 MB is used (taken from golang's http2) - Decoding header blocks now happens as they are received, instead of buffering up possibly forever until the last continuation frame is parsed. - As each field is decoded, it's undecoded size is added to the total. Whenever a header block goes over the maximum size, the `frame` will be marked as such. - Whenever a header block is deemed over max limit, decoding will still continue, but new fields will not be appended to `HeaderMap`. This is also can save wasted hashing. - To protect against enormous string literals, such that they span multiple continuation frames, a check is made that the combined encoded bytes is less than the max allowed size. While technically not exactly what the spec suggests (counting decoded size instead), this should hopefully only happen when someone is indeed malicious. If found, a `GOAWAY` of `COMPRESSION_ERROR` is sent, and the connection shut down. - After an oversize header block frame is finished decoding, the streams state machine will notice it is oversize, and handle that. - If the local peer is a server, a 431 response is sent, as suggested by the spec. - A `REFUSED_STREAM` reset is sent, since we cannot actually give the stream to the user. - In order to be able to send both the 431 headers frame, and a reset frame afterwards, the scheduled `Canceled` machinery was made more general to a `Scheduled(Reason)` state instead. Closes #18 Closes #191
This commit is contained in:
		| @@ -659,10 +659,12 @@ impl Prioritize { | ||||
|                              ) | ||||
|                         ), | ||||
|                         None => { | ||||
|                             assert!(stream.state.is_canceled()); | ||||
|                             stream.state.set_reset(Reason::CANCEL); | ||||
|                             let reason = stream.state.get_scheduled_reset() | ||||
|                                 .expect("must be scheduled to reset"); | ||||
|  | ||||
|                             let frame = frame::Reset::new(stream.id, Reason::CANCEL); | ||||
|                             stream.state.set_reset(reason); | ||||
|  | ||||
|                             let frame = frame::Reset::new(stream.id, reason); | ||||
|                             Frame::Reset(frame) | ||||
|                         } | ||||
|                     }; | ||||
| @@ -674,7 +676,7 @@ impl Prioritize { | ||||
|                         self.last_opened_id = stream.id; | ||||
|                     } | ||||
|  | ||||
|                     if !stream.pending_send.is_empty() || stream.state.is_canceled() { | ||||
|                     if !stream.pending_send.is_empty() || stream.state.is_scheduled_reset() { | ||||
|                         // TODO: Only requeue the sender IF it is ready to send | ||||
|                         // the next frame. i.e. don't requeue it if the next | ||||
|                         // frame is a data frame and the stream does not have | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| use super::*; | ||||
| use super::store::Resolve; | ||||
| use {frame, proto}; | ||||
| use codec::{RecvError, UserError}; | ||||
| use frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE}; | ||||
| @@ -54,6 +55,12 @@ pub(super) enum Event { | ||||
|     Trailers(HeaderMap), | ||||
| } | ||||
|  | ||||
| #[derive(Debug)] | ||||
| pub(super) enum RecvHeaderBlockError<T> { | ||||
|     Oversize(T), | ||||
|     State(RecvError), | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone, Copy)] | ||||
| struct Indices { | ||||
|     head: store::Key, | ||||
| @@ -133,7 +140,7 @@ impl Recv { | ||||
|         frame: frame::Headers, | ||||
|         stream: &mut store::Ptr, | ||||
|         counts: &mut Counts, | ||||
|     ) -> Result<(), RecvError> { | ||||
|     ) -> Result<(), RecvHeaderBlockError<Option<frame::Headers>>> { | ||||
|         trace!("opening stream; init_window={}", self.init_window_sz); | ||||
|         let is_initial = stream.state.recv_open(frame.is_end_stream())?; | ||||
|  | ||||
| @@ -158,7 +165,7 @@ impl Recv { | ||||
|                         return Err(RecvError::Stream { | ||||
|                             id: stream.id, | ||||
|                             reason: Reason::PROTOCOL_ERROR, | ||||
|                         }) | ||||
|                         }.into()) | ||||
|                     }, | ||||
|                 }; | ||||
|  | ||||
| @@ -166,6 +173,32 @@ impl Recv { | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         if frame.is_over_size() { | ||||
|             // A frame is over size if the decoded header block was bigger than | ||||
|             // SETTINGS_MAX_HEADER_LIST_SIZE. | ||||
|             // | ||||
|             // > A server that receives a larger header block than it is willing | ||||
|             // > to handle can send an HTTP 431 (Request Header Fields Too | ||||
|             // > Large) status code [RFC6585]. A client can discard responses | ||||
|             // > that it cannot process. | ||||
|             // | ||||
|             // So, if peer is a server, we'll send a 431. In either case, | ||||
|             // an error is recorded, which will send a REFUSED_STREAM, | ||||
|             // since we don't want any of the data frames either. | ||||
|             trace!("recv_headers; frame for {:?} is over size", stream.id); | ||||
|             return if counts.peer().is_server() && is_initial { | ||||
|                 let mut res = frame::Headers::new( | ||||
|                     stream.id, | ||||
|                     frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE), | ||||
|                     HeaderMap::new() | ||||
|                 ); | ||||
|                 res.set_end_stream(); | ||||
|                 Err(RecvHeaderBlockError::Oversize(Some(res))) | ||||
|             } else { | ||||
|                 Err(RecvHeaderBlockError::Oversize(None)) | ||||
|             }; | ||||
|         } | ||||
|  | ||||
|         let message = counts.peer().convert_poll_message(frame)?; | ||||
|  | ||||
|         // Push the frame onto the stream's recv buffer | ||||
| @@ -517,15 +550,20 @@ impl Recv { | ||||
|         ); | ||||
|  | ||||
|         new_stream.state.reserve_remote()?; | ||||
|         // Store the stream | ||||
|         let new_stream = store.insert(frame.promised_id(), new_stream).key(); | ||||
|  | ||||
|  | ||||
|         if frame.is_over_size() { | ||||
|             trace!("recv_push_promise; frame for {:?} is over size", frame.promised_id()); | ||||
|             return Err(RecvError::Stream { | ||||
|                 id: frame.promised_id(), | ||||
|                 reason: Reason::REFUSED_STREAM, | ||||
|             }); | ||||
|         } | ||||
|  | ||||
|         let mut ppp = store[stream].pending_push_promises.take(); | ||||
|  | ||||
|         { | ||||
|             // Store the stream | ||||
|             let mut new_stream = store.insert(frame.promised_id(), new_stream); | ||||
|  | ||||
|             ppp.push(&mut new_stream); | ||||
|         } | ||||
|         ppp.push(&mut store.resolve(new_stream)); | ||||
|  | ||||
|         let stream = &mut store[stream]; | ||||
|  | ||||
| @@ -609,9 +647,7 @@ impl Recv { | ||||
|         stream: &mut store::Ptr, | ||||
|         counts: &mut Counts, | ||||
|     ) { | ||||
|         assert!(stream.state.is_local_reset()); | ||||
|  | ||||
|         if stream.is_pending_reset_expiration() { | ||||
|         if !stream.state.is_local_reset() || stream.is_pending_reset_expiration() { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
| @@ -842,6 +878,14 @@ impl Event { | ||||
|     } | ||||
| } | ||||
|  | ||||
| // ===== impl RecvHeaderBlockError ===== | ||||
|  | ||||
| impl<T> From<RecvError> for RecvHeaderBlockError<T> { | ||||
|     fn from(err: RecvError) -> Self { | ||||
|         RecvHeaderBlockError::State(err) | ||||
|     } | ||||
| } | ||||
|  | ||||
| // ===== util ===== | ||||
|  | ||||
| fn parse_u64(src: &[u8]) -> Result<u64, ()> { | ||||
|   | ||||
| @@ -132,6 +132,9 @@ impl Send { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         // Transition the state to reset no matter what. | ||||
|         stream.state.set_reset(reason); | ||||
|  | ||||
|         // If closed AND the send queue is flushed, then the stream cannot be | ||||
|         // reset explicitly, either. Implicit resets can still be queued. | ||||
|         if is_closed && is_empty { | ||||
| @@ -143,9 +146,6 @@ impl Send { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         // Transition the state | ||||
|         stream.state.set_reset(reason); | ||||
|  | ||||
|         self.recv_err(buffer, stream); | ||||
|  | ||||
|         let frame = frame::Reset::new(stream.id, reason); | ||||
| @@ -154,14 +154,18 @@ impl Send { | ||||
|         self.prioritize.queue_frame(frame.into(), buffer, stream, task); | ||||
|     } | ||||
|  | ||||
|     pub fn schedule_cancel(&mut self, stream: &mut store::Ptr, task: &mut Option<Task>) { | ||||
|         trace!("schedule_cancel; {:?}", stream.id); | ||||
|     pub fn schedule_implicit_reset( | ||||
|         &mut self, | ||||
|         stream: &mut store::Ptr, | ||||
|         reason: Reason, | ||||
|         task: &mut Option<Task>, | ||||
|     ) { | ||||
|         if stream.state.is_closed() { | ||||
|             // Stream is already closed, nothing more to do | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         stream.state.set_canceled(); | ||||
|         stream.state.set_scheduled_reset(reason); | ||||
|  | ||||
|         self.prioritize.reclaim_reserved_capacity(stream); | ||||
|         self.prioritize.schedule_send(stream, task); | ||||
|   | ||||
| @@ -76,10 +76,14 @@ enum Cause { | ||||
|     LocallyReset(Reason), | ||||
|     Io, | ||||
|  | ||||
|     /// The user droped all handles to the stream without explicitly canceling. | ||||
|     /// This indicates to the connection that a reset frame must be sent out | ||||
|     /// once the send queue has been flushed. | ||||
|     Canceled, | ||||
|     /// | ||||
|     /// Examples of when this could happen: | ||||
|     /// - User drops all references to a stream, so we want to CANCEL the it. | ||||
|     /// - Header block size was too large, so we want to REFUSE, possibly | ||||
|     ///   after sending a 431 response frame. | ||||
|     Scheduled(Reason), | ||||
| } | ||||
|  | ||||
| impl State { | ||||
| @@ -269,15 +273,22 @@ impl State { | ||||
|         self.inner = Closed(Cause::LocallyReset(reason)); | ||||
|     } | ||||
|  | ||||
|     /// Set the stream state to canceled | ||||
|     pub fn set_canceled(&mut self) { | ||||
|     /// Set the stream state to a scheduled reset. | ||||
|     pub fn set_scheduled_reset(&mut self, reason: Reason) { | ||||
|         debug_assert!(!self.is_closed()); | ||||
|         self.inner = Closed(Cause::Canceled); | ||||
|         self.inner = Closed(Cause::Scheduled(reason)); | ||||
|     } | ||||
|  | ||||
|     pub fn is_canceled(&self) -> bool { | ||||
|     pub fn get_scheduled_reset(&self) -> Option<Reason> { | ||||
|         match self.inner { | ||||
|             Closed(Cause::Canceled) => true, | ||||
|             Closed(Cause::Scheduled(reason)) => Some(reason), | ||||
|             _ => None, | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub fn is_scheduled_reset(&self) -> bool { | ||||
|         match self.inner { | ||||
|             Closed(Cause::Scheduled(..)) => true, | ||||
|             _ => false, | ||||
|         } | ||||
|     } | ||||
| @@ -285,7 +296,7 @@ impl State { | ||||
|     pub fn is_local_reset(&self) -> bool { | ||||
|         match self.inner { | ||||
|             Closed(Cause::LocallyReset(_)) => true, | ||||
|             Closed(Cause::Canceled) => true, | ||||
|             Closed(Cause::Scheduled(..)) => true, | ||||
|             _ => false, | ||||
|         } | ||||
|     } | ||||
| @@ -381,8 +392,8 @@ impl State { | ||||
|         // TODO: Is this correct? | ||||
|         match self.inner { | ||||
|             Closed(Cause::Proto(reason)) | | ||||
|             Closed(Cause::LocallyReset(reason)) => Err(proto::Error::Proto(reason)), | ||||
|             Closed(Cause::Canceled) => Err(proto::Error::Proto(Reason::CANCEL)), | ||||
|             Closed(Cause::LocallyReset(reason)) | | ||||
|             Closed(Cause::Scheduled(reason)) => Err(proto::Error::Proto(reason)), | ||||
|             Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into())), | ||||
|             Closed(Cause::EndStream) | | ||||
|             HalfClosedRemote(..) => Ok(false), | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; | ||||
| use super::recv::RecvHeaderBlockError; | ||||
| use super::store::{self, Entry, Resolve, Store}; | ||||
| use {client, proto, server}; | ||||
| use codec::{Codec, RecvError, SendError, UserError}; | ||||
| @@ -164,7 +165,28 @@ where | ||||
|             ); | ||||
|  | ||||
|             let res = if stream.state.is_recv_headers() { | ||||
|                 actions.recv.recv_headers(frame, stream, counts) | ||||
|                 match actions.recv.recv_headers(frame, stream, counts) { | ||||
|                     Ok(()) => Ok(()), | ||||
|                     Err(RecvHeaderBlockError::Oversize(resp)) => { | ||||
|                         if let Some(resp) = resp { | ||||
|                             let _ = actions.send.send_headers( | ||||
|                                 resp, send_buffer, stream, counts, &mut actions.task); | ||||
|  | ||||
|                             actions.send.schedule_implicit_reset( | ||||
|                                 stream, | ||||
|                                 Reason::REFUSED_STREAM, | ||||
|                                 &mut actions.task); | ||||
|                             actions.recv.enqueue_reset_expiration(stream, counts); | ||||
|                             Ok(()) | ||||
|                         } else { | ||||
|                             Err(RecvError::Stream { | ||||
|                                 id: stream.id, | ||||
|                                 reason: Reason::REFUSED_STREAM, | ||||
|                             }) | ||||
|                         } | ||||
|                     }, | ||||
|                     Err(RecvHeaderBlockError::State(err)) => Err(err), | ||||
|                 } | ||||
|             } else { | ||||
|                 if !frame.is_end_stream() { | ||||
|                     // TODO: Is this the right error | ||||
| @@ -363,22 +385,42 @@ where | ||||
|         let me = &mut *me; | ||||
|  | ||||
|         let id = frame.stream_id(); | ||||
|         let promised_id = frame.promised_id(); | ||||
|  | ||||
|         let stream = match me.store.find_mut(&id) { | ||||
|             Some(stream) => stream.key(), | ||||
|             None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)), | ||||
|         let res = { | ||||
|             let stream = match me.store.find_mut(&id) { | ||||
|                 Some(stream) => stream.key(), | ||||
|                 None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)), | ||||
|             }; | ||||
|  | ||||
|             if me.counts.peer().is_server() { | ||||
|                 // The remote is a client and cannot reserve | ||||
|                 trace!("recv_push_promise; error remote is client"); | ||||
|                 return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); | ||||
|             } | ||||
|  | ||||
|             me.actions.recv.recv_push_promise(frame, | ||||
|                                               &me.actions.send, | ||||
|                                               stream, | ||||
|                                               &mut me.store) | ||||
|         }; | ||||
|  | ||||
|         if me.counts.peer().is_server() { | ||||
|             // The remote is a client and cannot reserve | ||||
|             trace!("recv_push_promise; error remote is client"); | ||||
|             return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); | ||||
|         } | ||||
|         if let Err(err) = res { | ||||
|             if let Some(ref mut new_stream) = me.store.find_mut(&promised_id) { | ||||
|  | ||||
|         me.actions.recv.recv_push_promise(frame, | ||||
|                                           &me.actions.send, | ||||
|                                           stream, | ||||
|                                           &mut me.store) | ||||
|                 let mut send_buffer = self.send_buffer.inner.lock().unwrap(); | ||||
|                 me.actions.reset_on_recv_stream_err(&mut *send_buffer, new_stream, Err(err)) | ||||
|             } else { | ||||
|                 // If there was a stream error, the stream should have been stored | ||||
|                 // so we can track sending a reset. | ||||
|                 // | ||||
|                 // Otherwise, this MUST be an connection error. | ||||
|                 assert!(!err.is_stream_error()); | ||||
|                 Err(err) | ||||
|             } | ||||
|         } else { | ||||
|             res | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub fn next_incoming(&mut self) -> Option<StreamRef<B>> { | ||||
| @@ -925,8 +967,9 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) { | ||||
|  | ||||
| fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { | ||||
|     if stream.is_canceled_interest() { | ||||
|         actions.send.schedule_cancel( | ||||
|         actions.send.schedule_implicit_reset( | ||||
|             stream, | ||||
|             Reason::CANCEL, | ||||
|             &mut actions.task); | ||||
|         actions.recv.enqueue_reset_expiration(stream, counts); | ||||
|     } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user