Add client support for server push (#314)
This patch exposes push promises to the client API. Closes #252
This commit is contained in:
committed by
Carl Lerche
parent
6d8554a23c
commit
6b23542a55
@@ -28,7 +28,6 @@ use frame::{StreamId, StreamIdOverflow};
|
||||
use proto::*;
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::{Request, Response};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -3,7 +3,7 @@ use {frame, proto};
|
||||
use codec::{RecvError, UserError};
|
||||
use frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE};
|
||||
|
||||
use http::HeaderMap;
|
||||
use http::{HeaderMap, Response, Request, Method};
|
||||
|
||||
use std::io;
|
||||
use std::time::{Duration, Instant};
|
||||
@@ -216,7 +216,9 @@ impl Recv {
|
||||
};
|
||||
}
|
||||
|
||||
let message = counts.peer().convert_poll_message(frame)?;
|
||||
let stream_id = frame.stream_id();
|
||||
let (pseudo, fields) = frame.into_parts();
|
||||
let message = counts.peer().convert_poll_message(pseudo, fields, stream_id)?;
|
||||
|
||||
// Push the frame onto the stream's recv buffer
|
||||
stream
|
||||
@@ -247,6 +249,37 @@ impl Recv {
|
||||
}
|
||||
}
|
||||
|
||||
/// Called by the client to get pushed response
|
||||
pub fn poll_pushed(
|
||||
&mut self, stream: &mut store::Ptr
|
||||
) -> Poll<Option<(Request<()>, store::Key)>, proto::Error> {
|
||||
use super::peer::PollMessage::*;
|
||||
|
||||
let mut ppp = stream.pending_push_promises.take();
|
||||
let pushed = ppp.pop(stream.store_mut()).map(
|
||||
|mut pushed| match pushed.pending_recv.pop_front(&mut self.buffer) {
|
||||
Some(Event::Headers(Server(headers))) =>
|
||||
Async::Ready(Some((headers, pushed.key()))),
|
||||
// When frames are pushed into the queue, it is verified that
|
||||
// the first frame is a HEADERS frame.
|
||||
_ => panic!("Headers not set on pushed stream")
|
||||
}
|
||||
);
|
||||
stream.pending_push_promises = ppp;
|
||||
if let Some(p) = pushed {
|
||||
Ok(p)
|
||||
} else {
|
||||
let is_open = stream.state.ensure_recv_open()?;
|
||||
|
||||
if is_open {
|
||||
stream.recv_task = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
} else {
|
||||
Ok(Async::Ready(None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Called by the client to get the response
|
||||
pub fn poll_response(
|
||||
&mut self,
|
||||
@@ -538,13 +571,7 @@ impl Recv {
|
||||
frame: frame::PushPromise,
|
||||
stream: &mut store::Ptr,
|
||||
) -> Result<(), RecvError> {
|
||||
|
||||
// TODO: Streams in the reserved states do not count towards the concurrency
|
||||
// limit. However, it seems like there should be a cap otherwise this
|
||||
// could grow in memory indefinitely.
|
||||
|
||||
stream.state.reserve_remote()?;
|
||||
|
||||
if frame.is_over_size() {
|
||||
// A frame is over size if the decoded header block was bigger than
|
||||
// SETTINGS_MAX_HEADER_LIST_SIZE.
|
||||
@@ -564,9 +591,46 @@ impl Recv {
|
||||
});
|
||||
}
|
||||
|
||||
let promised_id = frame.promised_id();
|
||||
use http::header;
|
||||
let (pseudo, fields) = frame.into_parts();
|
||||
let req = ::server::Peer::convert_poll_message(pseudo, fields, promised_id)?;
|
||||
// The spec has some requirements for promised request headers
|
||||
// [https://httpwg.org/specs/rfc7540.html#PushRequests]
|
||||
|
||||
// A promised request "that indicates the presence of a request body
|
||||
// MUST reset the promised stream with a stream error"
|
||||
if let Some(content_length) = req.headers().get(header::CONTENT_LENGTH) {
|
||||
match parse_u64(content_length.as_bytes()) {
|
||||
Ok(0) => {},
|
||||
_ => {
|
||||
return Err(RecvError::Stream {
|
||||
id: promised_id,
|
||||
reason: Reason::PROTOCOL_ERROR,
|
||||
});
|
||||
},
|
||||
}
|
||||
}
|
||||
// "The server MUST include a method in the :method pseudo-header field
|
||||
// that is safe and cacheable"
|
||||
if !Self::safe_and_cacheable(req.method()) {
|
||||
return Err(RecvError::Stream {
|
||||
id: promised_id,
|
||||
reason: Reason::PROTOCOL_ERROR,
|
||||
});
|
||||
}
|
||||
use super::peer::PollMessage::*;
|
||||
stream.pending_recv.push_back(&mut self.buffer, Event::Headers(Server(req)));
|
||||
stream.notify_recv();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn safe_and_cacheable(method: &Method) -> bool {
|
||||
// Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods
|
||||
// Safe: https://httpwg.org/specs/rfc7231.html#safe.methods
|
||||
return method == Method::GET || method == Method::HEAD;
|
||||
}
|
||||
|
||||
/// Ensures that `id` is not in the `Idle` state.
|
||||
pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
|
||||
if let Ok(next) = self.next_stream_id {
|
||||
|
||||
@@ -153,10 +153,7 @@ impl State {
|
||||
if eos {
|
||||
Closed(Cause::EndStream)
|
||||
} else {
|
||||
Open {
|
||||
local: AwaitingHeaders,
|
||||
remote,
|
||||
}
|
||||
HalfClosedLocal(Streaming)
|
||||
}
|
||||
},
|
||||
Open {
|
||||
|
||||
@@ -245,7 +245,6 @@ where
|
||||
if let Err(RecvError::Stream { .. }) = res {
|
||||
actions.recv.release_connection_capacity(sz as WindowSize, &mut None);
|
||||
}
|
||||
|
||||
actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
|
||||
})
|
||||
}
|
||||
@@ -426,6 +425,10 @@ where
|
||||
None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)),
|
||||
};
|
||||
|
||||
// TODO: Streams in the reserved states do not count towards the concurrency
|
||||
// limit. However, it seems like there should be a cap otherwise this
|
||||
// could grow in memory indefinitely.
|
||||
|
||||
// Ensure that we can reserve streams
|
||||
me.actions.recv.ensure_can_reserve()?;
|
||||
|
||||
@@ -437,8 +440,9 @@ where
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create a scope
|
||||
let child_key = {
|
||||
// Try to handle the frame and create a corresponding key for the pushed stream
|
||||
// this requires a bit of indirection to make the borrow checker happy.
|
||||
let child_key: Option<store::Key> = {
|
||||
// Create state for the stream
|
||||
let stream = me.store.insert(promised_id, {
|
||||
Stream::new(
|
||||
@@ -450,23 +454,29 @@ where
|
||||
let actions = &mut me.actions;
|
||||
|
||||
me.counts.transition(stream, |counts, stream| {
|
||||
let res = actions.recv.recv_push_promise(frame, stream);
|
||||
let stream_valid =
|
||||
actions.recv.recv_push_promise(frame, stream);
|
||||
|
||||
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
|
||||
actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, res)
|
||||
.map(|_| stream.key())
|
||||
match stream_valid {
|
||||
Ok(()) =>
|
||||
Ok(Some(stream.key())),
|
||||
_ => {
|
||||
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
|
||||
actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, stream_valid)
|
||||
.map(|()| None)
|
||||
}
|
||||
}
|
||||
})?
|
||||
};
|
||||
// If we're successful, push the headers and stream...
|
||||
if let Some(child) = child_key {
|
||||
let mut ppp = me.store[parent_key].pending_push_promises.take();
|
||||
ppp.push(&mut me.store.resolve(child));
|
||||
|
||||
// Push the stream... this requires a bit of indirection to make
|
||||
// the borrow checker happy.
|
||||
let mut ppp = me.store[parent_key].pending_push_promises.take();
|
||||
ppp.push(&mut me.store.resolve(child_key));
|
||||
|
||||
let parent = &mut me.store[parent_key];
|
||||
|
||||
parent.pending_push_promises = ppp;
|
||||
parent.notify_recv();
|
||||
let parent = &mut me.store.resolve(parent_key);
|
||||
parent.pending_push_promises = ppp;
|
||||
parent.notify_recv();
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -972,6 +982,26 @@ impl OpaqueStreamRef {
|
||||
|
||||
me.actions.recv.poll_response(&mut stream)
|
||||
}
|
||||
/// Called by a client to check for a pushed request.
|
||||
pub fn poll_pushed(
|
||||
&mut self
|
||||
) -> Poll<Option<(Request<()>, OpaqueStreamRef)>, proto::Error> {
|
||||
let mut me = self.inner.lock().unwrap();
|
||||
let me = &mut *me;
|
||||
|
||||
let res = {
|
||||
let mut stream = me.store.resolve(self.key);
|
||||
try_ready!(me.actions.recv.poll_pushed(&mut stream))
|
||||
};
|
||||
Ok(Async::Ready(res.map(|(h, key)| {
|
||||
me.store.resolve(key).ref_inc();
|
||||
let opaque_ref =
|
||||
OpaqueStreamRef {
|
||||
inner: self.inner.clone(), key,
|
||||
};
|
||||
(h, opaque_ref)
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn body_is_empty(&self) -> bool {
|
||||
let mut me = self.inner.lock().unwrap();
|
||||
@@ -1102,6 +1132,7 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
|
||||
maybe_cancel(stream, actions, counts);
|
||||
|
||||
if stream.ref_count == 0 {
|
||||
// We won't be able to reach our push promises anymore
|
||||
let mut ppp = stream.pending_push_promises.take();
|
||||
while let Some(promise) = ppp.pop(stream.store_mut()) {
|
||||
counts.transition(promise, |counts, stream| {
|
||||
|
||||
Reference in New Issue
Block a user