Update lib to std-future

This commit is contained in:
Gurwinder Singh
2019-08-15 08:25:14 +05:30
committed by Sean McArthur
parent 782f1f712c
commit c8fefd49f1
19 changed files with 1125 additions and 1038 deletions

View File

@@ -1,17 +1,18 @@
use crate::{client, frame, proto, server};
use crate::codec::RecvError;
use crate::frame::{Reason, StreamId};
use crate::{client, frame, proto, server};
use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE;
use crate::proto::*;
use bytes::{Bytes, IntoBuf};
use futures::{Stream, try_ready};
use tokio_io::{AsyncRead, AsyncWrite};
use std::marker::PhantomData;
use futures::{ready, Stream};
use std::io;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use tokio_io::{AsyncRead, AsyncWrite};
/// An H2 connection
#[derive(Debug)]
@@ -70,16 +71,15 @@ enum State {
impl<T, P, B> Connection<T, P, B>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
P: Peer,
B: IntoBuf,
B: IntoBuf + Unpin,
B::Buf: Unpin,
{
pub fn new(
codec: Codec<T, Prioritized<B::Buf>>,
config: Config,
) -> Connection<T, P, B> {
pub fn new(codec: Codec<T, Prioritized<B::Buf>>, config: Config) -> Connection<T, P, B> {
let streams = Streams::new(streams::Config {
local_init_window_sz: config.settings
local_init_window_sz: config
.settings
.initial_window_size()
.unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE),
initial_max_send_streams: config.initial_max_send_streams,
@@ -88,7 +88,8 @@ where
local_reset_duration: config.reset_stream_duration,
local_reset_max: config.reset_stream_max,
remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
remote_max_initiated: config.settings
remote_max_initiated: config
.settings
.max_concurrent_streams()
.map(|max| max as usize),
});
@@ -112,25 +113,24 @@ where
///
/// Returns `RecvError` as this may raise errors that are caused by delayed
/// processing of received frames.
fn poll_ready(&mut self) -> Poll<(), RecvError> {
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> {
// The order of these calls don't really matter too much
try_ready!(self.ping_pong.send_pending_pong(&mut self.codec));
try_ready!(self.ping_pong.send_pending_ping(&mut self.codec));
try_ready!(
self.settings
.send_pending_ack(&mut self.codec, &mut self.streams)
);
try_ready!(self.streams.send_pending_refusal(&mut self.codec));
ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?;
ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?;
ready!(self
.settings
.send_pending_ack(cx, &mut self.codec, &mut self.streams))?;
ready!(self.streams.send_pending_refusal(cx, &mut self.codec))?;
Ok(().into())
Poll::Ready(Ok(()))
}
/// Send any pending GOAWAY frames.
///
/// This will return `Some(reason)` if the connection should be closed
/// afterwards. If this is a graceful shutdown, this returns `None`.
fn poll_go_away(&mut self) -> Poll<Option<Reason>, io::Error> {
self.go_away.send_pending_go_away(&mut self.codec)
fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> {
self.go_away.send_pending_go_away(cx, &mut self.codec)
}
fn go_away(&mut self, id: StreamId, e: Reason) {
@@ -154,7 +154,7 @@ where
self.streams.recv_err(&proto::Error::Proto(e));
}
fn take_error(&mut self, ours: Reason) -> Poll<(), proto::Error> {
fn take_error(&mut self, ours: Reason) -> Poll<Result<(), proto::Error>> {
let reason = if let Some(theirs) = self.error.take() {
match (ours, theirs) {
// If either side reported an error, return that
@@ -171,9 +171,9 @@ where
};
if reason == Reason::NO_ERROR {
Ok(().into())
Poll::Ready(Ok(()))
} else {
Err(proto::Error::Proto(reason))
Poll::Ready(Err(proto::Error::Proto(reason)))
}
}
@@ -192,7 +192,7 @@ where
}
/// Advances the internal state of the connection.
pub fn poll(&mut self) -> Poll<(), proto::Error> {
pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
use crate::codec::RecvError::*;
loop {
@@ -200,15 +200,15 @@ where
match self.state {
// When open, continue to poll a frame
State::Open => {
match self.poll2() {
match self.poll2(cx) {
// The connection has shutdown normally
Ok(Async::Ready(())) => self.state = State::Closing(Reason::NO_ERROR),
Poll::Ready(Ok(())) => self.state = State::Closing(Reason::NO_ERROR),
// The connection is not ready to make progress
Ok(Async::NotReady) => {
Poll::Pending => {
// Ensure all window updates have been sent.
//
// This will also handle flushing `self.codec`
try_ready!(self.streams.poll_complete(&mut self.codec));
ready!(self.streams.poll_complete(cx, &mut self.codec))?;
if self.error.is_some() || self.go_away.should_close_on_idle() {
if !self.streams.has_streams() {
@@ -217,12 +217,12 @@ where
}
}
return Ok(Async::NotReady);
},
return Poll::Pending;
}
// Attempting to read a frame resulted in a connection level
// error. This is handled by setting a GOAWAY frame followed by
// terminating the connection.
Err(Connection(e)) => {
Poll::Ready(Err(Connection(e))) => {
log::debug!("Connection::poll; connection error={:?}", e);
// We may have already sent a GOAWAY for this error,
@@ -238,22 +238,19 @@ where
// Reset all active streams
self.streams.recv_err(&e.into());
self.go_away_now(e);
},
}
// Attempting to read a frame resulted in a stream level error.
// This is handled by resetting the frame then trying to read
// another frame.
Err(Stream {
id,
reason,
}) => {
Poll::Ready(Err(Stream { id, reason })) => {
log::trace!("stream error; id={:?}; reason={:?}", id, reason);
self.streams.send_reset(id, reason);
},
}
// Attempting to read a frame resulted in an I/O error. All
// active streams must be reset.
//
// TODO: Are I/O errors recoverable?
Err(Io(e)) => {
Poll::Ready(Err(Io(e))) => {
log::debug!("Connection::poll; IO error={:?}", e);
let e = e.into();
@@ -261,24 +258,24 @@ where
self.streams.recv_err(&e);
// Return the error
return Err(e);
},
return Poll::Ready(Err(e));
}
}
}
State::Closing(reason) => {
log::trace!("connection closing after flush");
// Flush/shutdown the codec
try_ready!(self.codec.shutdown());
ready!(self.codec.shutdown(cx))?;
// Transition the state to error
self.state = State::Closed(reason);
},
}
State::Closed(reason) => return self.take_error(reason),
}
}
}
fn poll2(&mut self) -> Poll<(), RecvError> {
fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> {
use crate::frame::Frame::*;
// This happens outside of the loop to prevent needing to do a clock
@@ -292,43 +289,51 @@ where
// The order here matters:
// - poll_go_away may buffer a graceful shutdown GOAWAY frame
// - If it has, we've also added a PING to be sent in poll_ready
if let Some(reason) = try_ready!(self.poll_go_away()) {
if self.go_away.should_close_now() {
if self.go_away.is_user_initiated() {
// A user initiated abrupt shutdown shouldn't return
// the same error back to the user.
return Ok(Async::Ready(()));
} else {
return Err(RecvError::Connection(reason));
match ready!(self.poll_go_away(cx)) {
Some(Ok(reason)) => {
if self.go_away.should_close_now() {
if self.go_away.is_user_initiated() {
// A user initiated abrupt shutdown shouldn't return
// the same error back to the user.
return Poll::Ready(Ok(()));
} else {
return Poll::Ready(Err(RecvError::Connection(reason)));
}
}
// Only NO_ERROR should be waiting for idle
debug_assert_eq!(
reason,
Reason::NO_ERROR,
"graceful GOAWAY should be NO_ERROR"
);
}
// Only NO_ERROR should be waiting for idle
debug_assert_eq!(reason, Reason::NO_ERROR, "graceful GOAWAY should be NO_ERROR");
Some(Err(e)) => return Poll::Ready(Err(e.into())),
None => (),
}
try_ready!(self.poll_ready());
ready!(self.poll_ready(cx))?;
match try_ready!(self.codec.poll()) {
Some(Headers(frame)) => {
match ready!(Pin::new(&mut self.codec).poll_next(cx)) {
Some(Ok(Headers(frame))) => {
log::trace!("recv HEADERS; frame={:?}", frame);
self.streams.recv_headers(frame)?;
},
Some(Data(frame)) => {
}
Some(Ok(Data(frame))) => {
log::trace!("recv DATA; frame={:?}", frame);
self.streams.recv_data(frame)?;
},
Some(Reset(frame)) => {
}
Some(Ok(Reset(frame))) => {
log::trace!("recv RST_STREAM; frame={:?}", frame);
self.streams.recv_reset(frame)?;
},
Some(PushPromise(frame)) => {
}
Some(Ok(PushPromise(frame))) => {
log::trace!("recv PUSH_PROMISE; frame={:?}", frame);
self.streams.recv_push_promise(frame)?;
},
Some(Settings(frame)) => {
}
Some(Ok(Settings(frame))) => {
log::trace!("recv SETTINGS; frame={:?}", frame);
self.settings.recv_settings(frame);
},
Some(GoAway(frame)) => {
}
Some(Ok(GoAway(frame))) => {
log::trace!("recv GOAWAY; frame={:?}", frame);
// This should prevent starting new streams,
// but should allow continuing to process current streams
@@ -336,8 +341,8 @@ where
// transition to GoAway.
self.streams.recv_go_away(&frame)?;
self.error = Some(frame.reason());
},
Some(Ping(frame)) => {
}
Some(Ok(Ping(frame))) => {
log::trace!("recv PING; frame={:?}", frame);
let status = self.ping_pong.recv_ping(frame);
if status.is_shutdown() {
@@ -349,21 +354,21 @@ where
let last_processed_id = self.streams.last_processed_id();
self.go_away(last_processed_id, Reason::NO_ERROR);
}
},
Some(WindowUpdate(frame)) => {
}
Some(Ok(WindowUpdate(frame))) => {
log::trace!("recv WINDOW_UPDATE; frame={:?}", frame);
self.streams.recv_window_update(frame)?;
},
Some(Priority(frame)) => {
}
Some(Ok(Priority(frame))) => {
log::trace!("recv PRIORITY; frame={:?}", frame);
// TODO: handle
},
}
Some(Err(e)) => return Poll::Ready(Err(e)),
None => {
log::trace!("codec closed");
self.streams.recv_eof(false)
.ok().expect("mutex poisoned");
return Ok(Async::Ready(()));
},
self.streams.recv_eof(false).ok().expect("mutex poisoned");
return Poll::Ready(Ok(()));
}
}
}
}
@@ -385,8 +390,9 @@ where
impl<T, B> Connection<T, server::Peer, B>
where
T: AsyncRead + AsyncWrite,
B: IntoBuf,
T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf + Unpin,
B::Buf: Unpin,
{
pub fn next_incoming(&mut self) -> Option<StreamRef<B::Buf>> {
self.streams.next_incoming()

View File

@@ -2,8 +2,8 @@ use crate::codec::Codec;
use crate::frame::{self, Reason, StreamId};
use bytes::Buf;
use futures::{Async, Poll};
use std::io;
use std::task::{Context, Poll};
use tokio_io::AsyncWrite;
/// Manages our sending of GOAWAY frames.
@@ -59,7 +59,7 @@ impl GoAway {
assert!(
f.last_stream_id() <= going_away.last_processed_id,
"GOAWAY stream IDs shouldn't be higher; \
last_processed_id = {:?}, f.last_stream_id() = {:?}",
last_processed_id = {:?}, f.last_stream_id() = {:?}",
going_away.last_processed_id,
f.last_stream_id(),
);
@@ -76,8 +76,8 @@ impl GoAway {
self.close_now = true;
if let Some(ref going_away) = self.going_away {
// Prevent sending the same GOAWAY twice.
if going_away.last_processed_id == f.last_stream_id()
&& going_away.reason == f.reason() {
if going_away.last_processed_id == f.last_stream_id() && going_away.reason == f.reason()
{
return;
}
}
@@ -100,9 +100,7 @@ impl GoAway {
/// Return the last Reason we've sent.
pub fn going_away_reason(&self) -> Option<Reason> {
self.going_away
.as_ref()
.map(|g| g.reason)
self.going_away.as_ref().map(|g| g.reason)
}
/// Returns if the connection should close now, or wait until idle.
@@ -112,36 +110,43 @@ impl GoAway {
/// Returns if the connection should be closed when idle.
pub fn should_close_on_idle(&self) -> bool {
!self.close_now && self.going_away
.as_ref()
.map(|g| g.last_processed_id != StreamId::MAX)
.unwrap_or(false)
!self.close_now
&& self
.going_away
.as_ref()
.map(|g| g.last_processed_id != StreamId::MAX)
.unwrap_or(false)
}
/// Try to write a pending GOAWAY frame to the buffer.
///
/// If a frame is written, the `Reason` of the GOAWAY is returned.
pub fn send_pending_go_away<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<Option<Reason>, io::Error>
pub fn send_pending_go_away<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<Option<io::Result<Reason>>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
if let Some(frame) = self.pending.take() {
if !dst.poll_ready()?.is_ready() {
if !dst.poll_ready(cx)?.is_ready() {
self.pending = Some(frame);
return Ok(Async::NotReady);
return Poll::Pending;
}
let reason = frame.reason();
dst.buffer(frame.into())
.ok()
.expect("invalid GOAWAY frame");
dst.buffer(frame.into()).ok().expect("invalid GOAWAY frame");
return Ok(Async::Ready(Some(reason)));
return Poll::Ready(Some(Ok(reason)));
} else if self.should_close_now() {
return Ok(Async::Ready(self.going_away_reason()));
return match self.going_away_reason() {
Some(reason) => Poll::Ready(Some(Ok(reason))),
None => Poll::Ready(None),
};
}
Ok(Async::Ready(None))
Poll::Ready(None)
}
}

View File

@@ -8,10 +8,10 @@ mod streams;
pub(crate) use self::connection::{Config, Connection};
pub(crate) use self::error::Error;
pub(crate) use self::peer::{Peer, Dyn as DynPeer};
pub(crate) use self::peer::{Dyn as DynPeer, Peer};
pub(crate) use self::ping_pong::UserPings;
pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams};
pub(crate) use self::streams::{PollReset, Prioritized, Open};
pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams};
pub(crate) use self::streams::{Open, PollReset, Prioritized};
use crate::codec::Codec;
@@ -21,9 +21,6 @@ use self::settings::Settings;
use crate::frame::{self, Frame};
use futures::{task, Async, Poll};
use futures::task::Task;
use bytes::Buf;
use tokio_io::AsyncWrite;

View File

@@ -3,11 +3,11 @@ use crate::frame::Ping;
use crate::proto::{self, PingPayload};
use bytes::Buf;
use futures::{Async, Poll};
use futures::task::AtomicTask;
use futures::task::AtomicWaker;
use std::io;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use tokio_io::AsyncWrite;
/// Acknowledges ping requests from the remote.
@@ -28,9 +28,9 @@ struct UserPingsRx(Arc<UserPingsInner>);
struct UserPingsInner {
state: AtomicUsize,
/// Task to wake up the main `Connection`.
ping_task: AtomicTask,
ping_task: AtomicWaker,
/// Task to wake up `share::PingPong::poll_pong`.
pong_task: AtomicTask,
pong_task: AtomicWaker,
}
#[derive(Debug)]
@@ -77,8 +77,8 @@ impl PingPong {
let user_pings = Arc::new(UserPingsInner {
state: AtomicUsize::new(USER_STATE_EMPTY),
ping_task: AtomicTask::new(),
pong_task: AtomicTask::new(),
ping_task: AtomicWaker::new(),
pong_task: AtomicWaker::new(),
});
self.user_pings = Some(UserPingsRx(user_pings.clone()));
Some(UserPings(user_pings))
@@ -135,34 +135,42 @@ impl PingPong {
}
/// Send any pending pongs.
pub(crate) fn send_pending_pong<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<(), io::Error>
pub(crate) fn send_pending_pong<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
if let Some(pong) = self.pending_pong.take() {
if !dst.poll_ready()?.is_ready() {
if !dst.poll_ready(cx)?.is_ready() {
self.pending_pong = Some(pong);
return Ok(Async::NotReady);
return Poll::Pending;
}
dst.buffer(Ping::pong(pong).into())
.expect("invalid pong frame");
}
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
/// Send any pending pings.
pub(crate) fn send_pending_ping<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<(), io::Error>
pub(crate) fn send_pending_ping<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
if let Some(ref mut ping) = self.pending_ping {
if !ping.sent {
if !dst.poll_ready()?.is_ready() {
return Ok(Async::NotReady);
if !dst.poll_ready(cx)?.is_ready() {
return Poll::Pending;
}
dst.buffer(Ping::new(ping.payload).into())
@@ -171,19 +179,22 @@ impl PingPong {
}
} else if let Some(ref users) = self.user_pings {
if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING {
if !dst.poll_ready()?.is_ready() {
return Ok(Async::NotReady);
if !dst.poll_ready(cx)?.is_ready() {
return Poll::Pending;
}
dst.buffer(Ping::new(Ping::USER).into())
.expect("invalid ping frame");
users.0.state.store(USER_STATE_PENDING_PONG, Ordering::Release);
users
.0
.state
.store(USER_STATE_PENDING_PONG, Ordering::Release);
} else {
users.0.ping_task.register();
users.0.ping_task.register(cx.waker());
}
}
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
}
@@ -201,19 +212,17 @@ impl ReceivedPing {
impl UserPings {
pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> {
let prev = self.0.state.compare_and_swap(
USER_STATE_EMPTY, // current
USER_STATE_EMPTY, // current
USER_STATE_PENDING_PING, // new
Ordering::AcqRel,
);
match prev {
USER_STATE_EMPTY => {
self.0.ping_task.notify();
self.0.ping_task.wake();
Ok(())
},
USER_STATE_CLOSED => {
Err(Some(broken_pipe().into()))
}
USER_STATE_CLOSED => Err(Some(broken_pipe().into())),
_ => {
// Was already pending, user error!
Err(None)
@@ -221,20 +230,20 @@ impl UserPings {
}
}
pub(crate) fn poll_pong(&self) -> Poll<(), proto::Error> {
pub(crate) fn poll_pong(&self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
// Must register before checking state, in case state were to change
// before we could register, and then the ping would just be lost.
self.0.pong_task.register();
self.0.pong_task.register(cx.waker());
let prev = self.0.state.compare_and_swap(
USER_STATE_RECEIVED_PONG, // current
USER_STATE_EMPTY, // new
USER_STATE_EMPTY, // new
Ordering::AcqRel,
);
match prev {
USER_STATE_RECEIVED_PONG => Ok(Async::Ready(())),
USER_STATE_CLOSED => Err(broken_pipe().into()),
_ => Ok(Async::NotReady),
USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())),
USER_STATE_CLOSED => Poll::Ready(Err(broken_pipe().into())),
_ => Poll::Pending,
}
}
}
@@ -244,13 +253,13 @@ impl UserPings {
impl UserPingsRx {
fn receive_pong(&self) -> bool {
let prev = self.0.state.compare_and_swap(
USER_STATE_PENDING_PONG, // current
USER_STATE_PENDING_PONG, // current
USER_STATE_RECEIVED_PONG, // new
Ordering::AcqRel,
);
if prev == USER_STATE_PENDING_PONG {
self.0.pong_task.notify();
self.0.pong_task.wake();
true
} else {
false
@@ -261,7 +270,7 @@ impl UserPingsRx {
impl Drop for UserPingsRx {
fn drop(&mut self) {
self.0.state.store(USER_STATE_CLOSED, Ordering::Release);
self.0.pong_task.notify();
self.0.pong_task.wake();
}
}

View File

@@ -1,6 +1,7 @@
use crate::codec::RecvError;
use crate::frame;
use crate::proto::*;
use std::task::{Poll, Context};
#[derive(Debug)]
pub(crate) struct Settings {
@@ -29,21 +30,22 @@ impl Settings {
pub fn send_pending_ack<T, B, C, P>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
streams: &mut Streams<C, P>,
) -> Poll<(), RecvError>
) -> Poll<Result<(), RecvError>>
where
T: AsyncWrite,
B: Buf,
C: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
C: Buf + Unpin,
P: Peer,
{
log::trace!("send_pending_ack; pending={:?}", self.pending);
if let Some(ref settings) = self.pending {
if !dst.poll_ready()?.is_ready() {
if let Some(settings) = &self.pending {
if !dst.poll_ready(cx)?.is_ready() {
log::trace!("failed to send ACK");
return Ok(Async::NotReady);
return Poll::Pending;
}
// Create an ACK settings frame
@@ -65,6 +67,6 @@ impl Settings {
self.pending = None;
Ok(().into())
Poll::Ready(Ok(()))
}
}

View File

@@ -7,10 +7,10 @@ use crate::codec::UserError;
use crate::codec::UserError::*;
use bytes::buf::Take;
use futures::try_ready;
use futures::ready;
use std::{cmp, fmt, mem};
use std::io;
use std::task::{Context, Poll, Waker};
/// # Warning
///
@@ -104,14 +104,14 @@ impl Prioritize {
frame: Frame<B>,
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) {
// Queue the frame in the buffer
stream.pending_send.push_back(buffer, frame);
self.schedule_send(stream, task);
}
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Task>) {
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
// If the stream is waiting to be opened, nothing more to do.
if !stream.is_pending_open {
log::trace!("schedule_send; {:?}", stream.id);
@@ -120,7 +120,7 @@ impl Prioritize {
// Notify the connection.
if let Some(task) = task.take() {
task.notify();
task.wake();
}
}
}
@@ -136,7 +136,7 @@ impl Prioritize {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), UserError>
where
B: Buf,
@@ -483,17 +483,18 @@ impl Prioritize {
pub fn poll_complete<T, B>(
&mut self,
cx: &mut Context,
buffer: &mut Buffer<Frame<B>>,
store: &mut Store,
counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
// Ensure codec is ready
try_ready!(dst.poll_ready());
ready!(dst.poll_ready(cx))?;
// Reclaim any frame that has previously been written
self.reclaim_frame(buffer, store, dst);
@@ -517,18 +518,18 @@ impl Prioritize {
dst.buffer(frame).ok().expect("invalid frame");
// Ensure the codec is ready to try the loop again.
try_ready!(dst.poll_ready());
ready!(dst.poll_ready(cx))?;
// Because, always try to reclaim...
self.reclaim_frame(buffer, store, dst);
},
None => {
// Try to flush the codec.
try_ready!(dst.flush());
ready!(dst.flush(cx))?;
// This might release a data frame...
if !self.reclaim_frame(buffer, store, dst) {
return Ok(().into());
return Poll::Ready(Ok(()))
}
// No need to poll ready as poll_complete() does this for

View File

@@ -1,13 +1,15 @@
use std::task::Context;
use super::*;
use crate::{frame, proto};
use crate::codec::{RecvError, UserError};
use crate::frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE};
use http::{HeaderMap, Response, Request, Method};
use futures::try_ready;
use futures::ready;
use std::io;
use std::time::{Duration, Instant};
use std::task::{Poll, Waker};
#[derive(Debug)]
pub(super) struct Recv {
@@ -257,15 +259,17 @@ impl Recv {
/// Called by the client to get pushed response
pub fn poll_pushed(
&mut self, stream: &mut store::Ptr
) -> Poll<Option<(Request<()>, store::Key)>, proto::Error> {
&mut self,
cx: &Context,
stream: &mut store::Ptr
) -> Poll<Option<Result<(Request<()>, store::Key), proto::Error>>> {
use super::peer::PollMessage::*;
let mut ppp = stream.pending_push_promises.take();
let pushed = ppp.pop(stream.store_mut()).map(
|mut pushed| match pushed.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Headers(Server(headers))) =>
Async::Ready(Some((headers, pushed.key()))),
(headers, pushed.key()),
// When frames are pushed into the queue, it is verified that
// the first frame is a HEADERS frame.
_ => panic!("Headers not set on pushed stream")
@@ -273,15 +277,15 @@ impl Recv {
);
stream.pending_push_promises = ppp;
if let Some(p) = pushed {
Ok(p)
Poll::Ready(Some(Ok(p)))
} else {
let is_open = stream.state.ensure_recv_open()?;
if is_open {
stream.recv_task = Some(task::current());
Ok(Async::NotReady)
stream.recv_task = Some(cx.waker().clone());
Poll::Pending
} else {
Ok(Async::Ready(None))
Poll::Ready(None)
}
}
}
@@ -289,20 +293,21 @@ impl Recv {
/// Called by the client to get the response
pub fn poll_response(
&mut self,
cx: &Context,
stream: &mut store::Ptr,
) -> Poll<Response<()>, proto::Error> {
) -> Poll<Result<Response<()>, proto::Error>> {
use super::peer::PollMessage::*;
// If the buffer is not empty, then the first frame must be a HEADERS
// frame or the user violated the contract.
match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Headers(Client(response))) => Ok(response.into()),
Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response.into())),
Some(_) => panic!("poll_response called after response returned"),
None => {
stream.state.ensure_recv_open()?;
stream.recv_task = Some(task::current());
Ok(Async::NotReady)
stream.recv_task = Some(cx.waker().clone());
Poll::Pending
},
}
}
@@ -339,7 +344,7 @@ impl Recv {
pub fn release_connection_capacity(
&mut self,
capacity: WindowSize,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) {
log::trace!(
"release_connection_capacity; size={}, connection in_flight_data={}",
@@ -355,7 +360,7 @@ impl Recv {
if self.flow.unclaimed_capacity().is_some() {
if let Some(task) = task.take() {
task.notify();
task.wake();
}
}
}
@@ -365,7 +370,7 @@ impl Recv {
&mut self,
capacity: WindowSize,
stream: &mut store::Ptr,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), UserError> {
log::trace!("release_capacity; size={}", capacity);
@@ -387,7 +392,7 @@ impl Recv {
self.pending_window_updates.push(stream);
if let Some(task) = task.take() {
task.notify();
task.wake();
}
}
@@ -398,7 +403,7 @@ impl Recv {
pub fn release_closed_capacity(
&mut self,
stream: &mut store::Ptr,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) {
debug_assert_eq!(stream.ref_count, 0);
@@ -433,7 +438,7 @@ impl Recv {
///
/// The `task` is an optional parked task for the `Connection` that might
/// be blocked on needing more window capacity.
pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option<Task>) {
pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option<Waker>) {
log::trace!(
"set_target_connection_window; target={}; available={}, reserved={}",
target,
@@ -458,7 +463,7 @@ impl Recv {
// a connection WINDOW_UPDATE.
if self.flow.unclaimed_capacity().is_some() {
if let Some(task) = task.take() {
task.notify();
task.wake();
}
}
}
@@ -824,14 +829,15 @@ impl Recv {
/// Send any pending refusals.
pub fn send_pending_refusal<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
if let Some(stream_id) = self.refused {
try_ready!(dst.poll_ready());
ready!(dst.poll_ready(cx))?;
// Create the RST_STREAM frame
let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM);
@@ -844,7 +850,7 @@ impl Recv {
self.refused = None;
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
@@ -894,37 +900,39 @@ impl Recv {
pub fn poll_complete<T, B>(
&mut self,
cx: &mut Context,
store: &mut Store,
counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
// Send any pending connection level window updates
try_ready!(self.send_connection_window_update(dst));
ready!(self.send_connection_window_update(cx, dst))?;
// Send any pending stream level window updates
try_ready!(self.send_stream_window_updates(store, counts, dst));
ready!(self.send_stream_window_updates(cx, store, counts, dst))?;
Ok(().into())
Poll::Ready(Ok(()))
}
/// Send connection level window update
fn send_connection_window_update<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
if let Some(incr) = self.flow.unclaimed_capacity() {
let frame = frame::WindowUpdate::new(StreamId::zero(), incr);
// Ensure the codec has capacity
try_ready!(dst.poll_ready());
ready!(dst.poll_ready(cx))?;
// Buffer the WINDOW_UPDATE frame
dst.buffer(frame.into())
@@ -938,28 +946,29 @@ impl Recv {
.expect("unexpected flow control state");
}
Ok(().into())
Poll::Ready(Ok(()))
}
/// Send stream level window update
pub fn send_stream_window_updates<T, B>(
&mut self,
cx: &mut Context,
store: &mut Store,
counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
B: Buf,
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
loop {
// Ensure the codec has capacity
try_ready!(dst.poll_ready());
ready!(dst.poll_ready(cx))?;
// Get the next stream
let stream = match self.pending_window_updates.pop(store) {
Some(stream) => stream,
None => return Ok(().into()),
None => return Poll::Ready(Ok(())),
};
counts.transition(stream, |_, stream| {
@@ -1001,10 +1010,10 @@ impl Recv {
self.pending_accept.pop(store).map(|ptr| ptr.key())
}
pub fn poll_data(&mut self, stream: &mut Stream) -> Poll<Option<Bytes>, proto::Error> {
pub fn poll_data(&mut self, cx: &Context, stream: &mut Stream) -> Poll<Option<Result<Bytes, proto::Error>>> {
// TODO: Return error when the stream is reset
match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Data(payload)) => Ok(Some(payload).into()),
Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))),
Some(event) => {
// Frame is trailer
stream.pending_recv.push_front(&mut self.buffer, event);
@@ -1020,36 +1029,37 @@ impl Recv {
stream.notify_recv();
// No more data frames
Ok(None.into())
Poll::Ready(None)
},
None => self.schedule_recv(stream),
None => self.schedule_recv(cx, stream),
}
}
pub fn poll_trailers(
&mut self,
cx: &Context,
stream: &mut Stream,
) -> Poll<Option<HeaderMap>, proto::Error> {
) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Trailers(trailers)) => Ok(Some(trailers).into()),
Some(Event::Trailers(trailers)) => Poll::Ready(Some(Ok(trailers))),
Some(event) => {
// Frame is not trailers.. not ready to poll trailers yet.
stream.pending_recv.push_front(&mut self.buffer, event);
Ok(Async::NotReady)
Poll::Pending
},
None => self.schedule_recv(stream),
None => self.schedule_recv(cx, stream),
}
}
fn schedule_recv<T>(&mut self, stream: &mut Stream) -> Poll<Option<T>, proto::Error> {
fn schedule_recv<T>(&mut self, cx: &Context, stream: &mut Stream) -> Poll<Option<Result<T, proto::Error>>> {
if stream.state.ensure_recv_open()? {
// Request to get notified once more frames arrive
stream.recv_task = Some(task::current());
Ok(Async::NotReady)
stream.recv_task = Some(cx.waker().clone());
Poll::Pending
} else {
// No more frames will be received
Ok(None.into())
Poll::Ready(None)
}
}
}

View File

@@ -1,14 +1,13 @@
use super::{
store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
StreamIdOverflow, WindowSize,
};
use crate::codec::{RecvError, UserError};
use crate::frame::{self, Reason};
use super::{
store, Buffer, Codec, Config, Counts, Frame, Prioritize,
Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize,
};
use bytes::Buf;
use http;
use futures::{Async, Poll};
use futures::task::Task;
use std::task::{Context, Poll, Waker};
use tokio_io::AsyncWrite;
use std::io;
@@ -60,7 +59,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), UserError> {
log::trace!(
"send_headers; frame={:?}; init_window={:?}",
@@ -81,7 +80,6 @@ impl Send {
if te != "trailers" {
log::debug!("illegal connection-specific headers found");
return Err(UserError::MalformedHeaders);
}
}
@@ -103,7 +101,8 @@ impl Send {
}
// Queue the frame for sending
self.prioritize.queue_frame(frame.into(), buffer, stream, task);
self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
Ok(())
}
@@ -115,7 +114,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) {
let is_reset = stream.state.is_reset();
let is_closed = stream.state.is_closed();
@@ -125,7 +124,7 @@ impl Send {
"send_reset(..., reason={:?}, stream={:?}, ..., \
is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
state={:?} \
",
",
reason,
stream.id,
is_reset,
@@ -151,7 +150,7 @@ impl Send {
if is_closed && is_empty {
log::trace!(
" -> not sending explicit RST_STREAM ({:?} was closed \
and send queue was flushed)",
and send queue was flushed)",
stream.id
);
return;
@@ -166,7 +165,8 @@ impl Send {
let frame = frame::Reset::new(stream.id, reason);
log::trace!("send_reset -- queueing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task);
self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
self.prioritize.reclaim_all_capacity(stream, counts);
}
@@ -175,7 +175,7 @@ impl Send {
stream: &mut store::Ptr,
reason: Reason,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) {
if stream.state.is_closed() {
// Stream is already closed, nothing more to do
@@ -194,11 +194,13 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), UserError>
where B: Buf,
where
B: Buf,
{
self.prioritize.send_data(frame, buffer, stream, counts, task)
self.prioritize
.send_data(frame, buffer, stream, counts, task)
}
pub fn send_trailers<B>(
@@ -207,7 +209,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), UserError> {
// TODO: Should this logic be moved into state.rs?
if !stream.state.is_send_streaming() {
@@ -221,7 +223,8 @@ impl Send {
stream.state.send_close();
log::trace!("send_trailers -- queuing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task);
self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
// Release any excess capacity
self.prioritize.reserve_capacity(0, stream, counts);
@@ -231,15 +234,18 @@ impl Send {
pub fn poll_complete<T, B>(
&mut self,
cx: &mut Context,
buffer: &mut Buffer<Frame<B>>,
store: &mut Store,
counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
where T: AsyncWrite,
B: Buf,
) -> Poll<io::Result<()>>
where
T: AsyncWrite + Unpin,
B: Buf + Unpin,
{
self.prioritize.poll_complete(buffer, store, counts, dst)
self.prioritize
.poll_complete(cx, buffer, store, counts, dst)
}
/// Request capacity to send data
@@ -247,27 +253,28 @@ impl Send {
&mut self,
capacity: WindowSize,
stream: &mut store::Ptr,
counts: &mut Counts)
{
counts: &mut Counts,
) {
self.prioritize.reserve_capacity(capacity, stream, counts)
}
pub fn poll_capacity(
&mut self,
cx: &Context,
stream: &mut store::Ptr,
) -> Poll<Option<WindowSize>, UserError> {
) -> Poll<Option<Result<WindowSize, UserError>>> {
if !stream.state.is_send_streaming() {
return Ok(Async::Ready(None));
return Poll::Ready(None);
}
if !stream.send_capacity_inc {
stream.wait_send();
return Ok(Async::NotReady);
stream.wait_send(cx);
return Poll::Pending;
}
stream.send_capacity_inc = false;
Ok(Async::Ready(Some(self.capacity(stream))))
Poll::Ready(Some(Ok(self.capacity(stream))))
}
/// Current available stream send capacity
@@ -284,15 +291,16 @@ impl Send {
pub fn poll_reset(
&self,
cx: &Context,
stream: &mut Stream,
mode: PollReset,
) -> Poll<Reason, crate::Error> {
) -> Poll<Result<Reason, crate::Error>> {
match stream.state.ensure_reason(mode)? {
Some(reason) => Ok(reason.into()),
Some(reason) => Poll::Ready(Ok(reason)),
None => {
stream.wait_send();
Ok(Async::NotReady)
},
stream.wait_send(cx);
Poll::Pending
}
}
}
@@ -312,14 +320,18 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), Reason> {
if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
log::debug!("recv_stream_window_update !!; err={:?}", e);
self.send_reset(
Reason::FLOW_CONTROL_ERROR.into(),
buffer, stream, counts, task);
buffer,
stream,
counts,
task,
);
return Err(e);
}
@@ -344,7 +356,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>,
store: &mut Store,
counts: &mut Counts,
task: &mut Option<Task>,
task: &mut Option<Waker>,
) -> Result<(), RecvError> {
// Applies an update to the remote endpoint's initial window size.
//
@@ -444,16 +456,14 @@ impl Send {
}
pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
self.next_stream_id.map_err(|_| UserError::OverflowedStreamId)
self.next_stream_id
.map_err(|_| UserError::OverflowedStreamId)
}
pub fn may_have_created_stream(&self, id: StreamId) -> bool {
if let Ok(next_id) = self.next_stream_id {
// Peer::is_local_init should have been called beforehand
debug_assert_eq!(
id.is_server_initiated(),
next_id.is_server_initiated(),
);
debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
id < next_id
} else {
true

View File

@@ -2,6 +2,7 @@ use super::*;
use std::time::Instant;
use std::usize;
use std::task::{Context, Waker};
/// Tracks Stream related state
///
@@ -47,7 +48,7 @@ pub(super) struct Stream {
pub buffered_send_data: WindowSize,
/// Task tracking additional send capacity (i.e. window updates).
send_task: Option<task::Task>,
send_task: Option<Waker>,
/// Frames pending for this stream being sent to the socket
pub pending_send: buffer::Deque,
@@ -96,7 +97,7 @@ pub(super) struct Stream {
pub pending_recv: buffer::Deque,
/// Task tracking receiving frames
pub recv_task: Option<task::Task>,
pub recv_task: Option<Waker>,
/// The stream's pending push promises
pub pending_push_promises: store::Queue<NextAccept>,
@@ -280,17 +281,17 @@ impl Stream {
pub fn notify_send(&mut self) {
if let Some(task) = self.send_task.take() {
task.notify();
task.wake();
}
}
pub fn wait_send(&mut self) {
self.send_task = Some(task::current());
pub fn wait_send(&mut self, cx: &Context) {
self.send_task = Some(cx.waker().clone());
}
pub fn notify_recv(&mut self) {
if let Some(task) = self.recv_task.take() {
task.notify();
task.wake();
}
}
}

View File

@@ -1,18 +1,20 @@
use crate::{client, proto, server};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{self, Frame, Reason};
use crate::proto::{peer, Peer, Open, WindowSize};
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use super::recv::RecvHeaderBlockError;
use super::store::{self, Entry, Resolve, Store};
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{self, Frame, Reason};
use crate::proto::{peer, Open, Peer, WindowSize};
use crate::{client, proto, server};
use bytes::{Buf, Bytes};
use futures::{task, Async, Poll, try_ready};
use futures::ready;
use http::{HeaderMap, Request, Response};
use std::task::{Context, Poll, Waker};
use tokio_io::AsyncWrite;
use std::{fmt, io};
use crate::PollExt;
use std::sync::{Arc, Mutex};
use std::{fmt, io};
#[derive(Debug)]
pub(crate) struct Streams<B, P>
@@ -77,7 +79,7 @@ struct Actions {
send: Send,
/// Task that calls `poll_complete`.
task: Option<task::Task>,
task: Option<Waker>,
/// If the connection errors, a copy is kept for any StreamRefs.
conn_error: Option<proto::Error>,
@@ -93,7 +95,7 @@ struct SendBuffer<B> {
impl<B, P> Streams<B, P>
where
B: Buf,
B: Buf + Unpin,
P: Peer,
{
pub fn new(config: Config) -> Self {
@@ -134,7 +136,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id());
log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring HEADERS",
id,
me.actions.recv.max_stream_id()
);
return Ok(());
}
@@ -170,10 +176,10 @@ where
);
e.insert(stream)
},
}
None => return Ok(()),
}
},
}
};
let stream = me.store.resolve(key);
@@ -254,15 +260,16 @@ where
// The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id());
log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring DATA",
id,
me.actions.recv.max_stream_id()
);
return Ok(());
}
if me.actions.may_have_forgotten_stream::<P>(id) {
log::debug!(
"recv_data for old stream={:?}, sending STREAM_CLOSED",
id,
);
log::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,);
let sz = frame.payload().len();
// This should have been enforced at the codec::FramedRead layer, so
@@ -279,7 +286,7 @@ where
proto_err!(conn: "recv_data: stream not found; id={:?}", id);
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
},
}
};
let actions = &mut me.actions;
@@ -294,7 +301,9 @@ where
// we won't give the data to the user, and so they can't
// release the capacity. We do it automatically.
if let Err(RecvError::Stream { .. }) = res {
actions.recv.release_connection_capacity(sz as WindowSize, &mut None);
actions
.recv
.release_connection_capacity(sz as WindowSize, &mut None);
}
actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
})
@@ -314,7 +323,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id());
log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM",
id,
me.actions.recv.max_stream_id()
);
return Ok(());
}
@@ -327,7 +340,7 @@ where
.map_err(RecvError::Connection)?;
return Ok(());
},
}
};
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
@@ -400,14 +413,16 @@ where
actions.recv.go_away(last_stream_id);
me.store
.for_each(|stream| if stream.id > last_stream_id {
counts.transition(stream, |counts, stream| {
actions.recv.recv_err(&err, &mut *stream);
actions.send.recv_err(send_buffer, stream, counts);
.for_each(|stream| {
if stream.id > last_stream_id {
counts.transition(stream, |counts, stream| {
actions.recv.recv_err(&err, &mut *stream);
actions.send.recv_err(send_buffer, stream, counts);
Ok::<_, ()>(())
})
} else {
Ok::<_, ()>(())
})
} else {
Ok::<_, ()>(())
}
})
.unwrap();
@@ -470,7 +485,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id());
log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE",
id,
me.actions.recv.max_stream_id()
);
return Ok(());
}
@@ -480,8 +499,8 @@ where
}
None => {
proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state");
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR))
},
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}
};
// TODO: Streams in the reserved states do not count towards the concurrency
@@ -495,7 +514,12 @@ where
//
// If `None` is returned, then the stream is being refused. There is no
// further work to be done.
if me.actions.recv.open(promised_id, Open::PushPromise, &mut me.counts)?.is_none() {
if me
.actions
.recv
.open(promised_id, Open::PushPromise, &mut me.counts)?
.is_none()
{
return Ok(());
}
@@ -507,21 +531,26 @@ where
Stream::new(
promised_id,
me.actions.send.init_window_sz(),
me.actions.recv.init_window_sz())
me.actions.recv.init_window_sz(),
)
});
let actions = &mut me.actions;
me.counts.transition(stream, |counts, stream| {
let stream_valid =
actions.recv.recv_push_promise(frame, stream);
let stream_valid = actions.recv.recv_push_promise(frame, stream);
match stream_valid {
Ok(()) =>
Ok(Some(stream.key())),
Ok(()) => Ok(Some(stream.key())),
_ => {
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, stream_valid)
actions
.reset_on_recv_stream_err(
&mut *send_buffer,
stream,
counts,
stream_valid,
)
.map(|()| None)
}
}
@@ -549,7 +578,11 @@ where
me.refs += 1;
key.map(|key| {
let stream = &mut me.store.resolve(key);
log::trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state);
log::trace!(
"next_incoming; id={:?}, state={:?}",
stream.id,
stream.state
);
StreamRef {
opaque: OpaqueStreamRef::new(self.inner.clone(), stream),
send_buffer: self.send_buffer.clone(),
@@ -559,25 +592,33 @@ where
pub fn send_pending_refusal<T>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error>
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
T: AsyncWrite + Unpin,
B: Unpin,
{
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
me.actions.recv.send_pending_refusal(dst)
me.actions.recv.send_pending_refusal(cx, dst)
}
pub fn clear_expired_reset_streams(&mut self) {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
me.actions.recv.clear_expired_reset_streams(&mut me.store, &mut me.counts);
me.actions
.recv
.clear_expired_reset_streams(&mut me.store, &mut me.counts);
}
pub fn poll_complete<T>(&mut self, dst: &mut Codec<T, Prioritized<B>>) -> Poll<(), io::Error>
pub fn poll_complete<T>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<io::Result<()>>
where
T: AsyncWrite,
T: AsyncWrite + Unpin,
{
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
@@ -589,20 +630,21 @@ where
//
// TODO: It would probably be better to interleave updates w/ data
// frames.
try_ready!(me.actions.recv.poll_complete(&mut me.store, &mut me.counts, dst));
ready!(me
.actions
.recv
.poll_complete(cx, &mut me.store, &mut me.counts, dst))?;
// Send any other pending frames
try_ready!(me.actions.send.poll_complete(
send_buffer,
&mut me.store,
&mut me.counts,
dst
));
ready!(me
.actions
.send
.poll_complete(cx, send_buffer, &mut me.store, &mut me.counts, dst))?;
// Nothing else to do, track the task
me.actions.task = Some(task::current());
me.actions.task = Some(cx.waker().clone());
Ok(().into())
Poll::Ready(Ok(()))
}
pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> {
@@ -615,7 +657,12 @@ where
me.counts.apply_remote_settings(frame);
me.actions.send.apply_remote_settings(
frame, send_buffer, &mut me.store, &mut me.counts, &mut me.actions.task)
frame,
send_buffer,
&mut me.store,
&mut me.counts,
&mut me.actions.task,
)
}
pub fn send_request(
@@ -624,8 +671,8 @@ where
end_of_stream: bool,
pending: Option<&OpaqueStreamRef>,
) -> Result<StreamRef<B>, SendError> {
use http::Method;
use super::stream::ContentLength;
use http::Method;
// TODO: There is a hazard with assigning a stream ID before the
// prioritize layer. If prioritization reorders new streams, this
@@ -671,8 +718,7 @@ where
}
// Convert the message
let headers = client::Peer::convert_send_message(
stream_id, request, end_of_stream)?;
let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?;
let mut stream = me.store.insert(stream.id, stream);
@@ -701,10 +747,7 @@ where
me.refs += 1;
Ok(StreamRef {
opaque: OpaqueStreamRef::new(
self.inner.clone(),
&mut stream,
),
opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream),
send_buffer: self.send_buffer.clone(),
})
}
@@ -719,13 +762,14 @@ where
let stream = Stream::new(id, 0, 0);
e.insert(stream)
},
}
};
let stream = me.store.resolve(key);
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
let send_buffer = &mut *send_buffer;
me.actions.send_reset(stream, reason, &mut me.counts, send_buffer);
me.actions
.send_reset(stream, reason, &mut me.counts, send_buffer);
}
pub fn send_go_away(&mut self, last_processed_id: StreamId) {
@@ -740,7 +784,11 @@ impl<B> Streams<B, client::Peer>
where
B: Buf,
{
pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), crate::Error> {
pub fn poll_pending_open(
&mut self,
cx: &Context,
pending: Option<&OpaqueStreamRef>,
) -> Poll<Result<(), crate::Error>> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
@@ -751,11 +799,11 @@ where
let mut stream = me.store.resolve(pending.key);
log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
if stream.is_pending_open {
stream.wait_send();
return Ok(Async::NotReady);
stream.wait_send(cx);
return Poll::Pending;
}
}
Ok(().into())
Poll::Ready(Ok(()))
}
}
@@ -845,7 +893,6 @@ where
}
}
// ===== impl StreamRef =====
impl<B> StreamRef<B> {
@@ -867,12 +914,9 @@ impl<B> StreamRef<B> {
frame.set_end_stream(end_stream);
// Send the data frame
actions.send.send_data(
frame,
send_buffer,
stream,
counts,
&mut actions.task)
actions
.send
.send_data(frame, send_buffer, stream, counts, &mut actions.task)
})
}
@@ -890,8 +934,9 @@ impl<B> StreamRef<B> {
let frame = frame::Headers::trailers(stream.id, trailers);
// Send the trailers frame
actions.send.send_trailers(
frame, send_buffer, stream, counts, &mut actions.task)
actions
.send
.send_trailers(frame, send_buffer, stream, counts, &mut actions.task)
})
}
@@ -903,7 +948,8 @@ impl<B> StreamRef<B> {
let mut send_buffer = self.send_buffer.inner.lock().unwrap();
let send_buffer = &mut *send_buffer;
me.actions.send_reset(stream, reason, &mut me.counts, send_buffer);
me.actions
.send_reset(stream, reason, &mut me.counts, send_buffer);
}
pub fn send_response(
@@ -922,8 +968,9 @@ impl<B> StreamRef<B> {
me.counts.transition(stream, |counts, stream| {
let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream);
actions.send.send_headers(
frame, send_buffer, stream, counts, &mut actions.task)
actions
.send
.send_headers(frame, send_buffer, stream, counts, &mut actions.task)
})
}
@@ -955,7 +1002,9 @@ impl<B> StreamRef<B> {
let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.reserve_capacity(capacity, &mut stream, &mut me.counts)
me.actions
.send
.reserve_capacity(capacity, &mut stream, &mut me.counts)
}
/// Returns the stream's current send capacity.
@@ -969,28 +1018,35 @@ impl<B> StreamRef<B> {
}
/// Request to be notified when the stream's capacity increases
pub fn poll_capacity(&mut self) -> Poll<Option<WindowSize>, UserError> {
pub fn poll_capacity(&mut self, cx: &Context) -> Poll<Option<Result<WindowSize, UserError>>> {
let mut me = self.opaque.inner.lock().unwrap();
let me = &mut *me;
let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.poll_capacity(&mut stream)
me.actions.send.poll_capacity(cx, &mut stream)
}
/// Request to be notified for if a `RST_STREAM` is received for this stream.
pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll<Reason, crate::Error> {
pub(crate) fn poll_reset(
&mut self,
cx: &Context,
mode: proto::PollReset,
) -> Poll<Result<Reason, crate::Error>> {
let mut me = self.opaque.inner.lock().unwrap();
let me = &mut *me;
let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.poll_reset(&mut stream, mode)
me.actions
.send
.poll_reset(cx, &mut stream, mode)
.map_err(From::from)
}
pub fn clone_to_opaque(&self) -> OpaqueStreamRef
where B: 'static,
where
B: 'static,
{
self.opaque.clone()
}
@@ -1015,35 +1071,37 @@ impl OpaqueStreamRef {
fn new(inner: Arc<Mutex<Inner>>, stream: &mut store::Ptr) -> OpaqueStreamRef {
stream.ref_inc();
OpaqueStreamRef {
inner, key: stream.key()
inner,
key: stream.key(),
}
}
/// Called by a client to check for a received response.
pub fn poll_response(&mut self) -> Poll<Response<()>, proto::Error> {
pub fn poll_response(&mut self, cx: &Context) -> Poll<Result<Response<()>, proto::Error>> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_response(&mut stream)
me.actions.recv.poll_response(cx, &mut stream)
}
/// Called by a client to check for a pushed request.
pub fn poll_pushed(
&mut self
) -> Poll<Option<(Request<()>, OpaqueStreamRef)>, proto::Error> {
&mut self,
cx: &Context,
) -> Poll<Option<Result<(Request<()>, OpaqueStreamRef), proto::Error>>> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
let res = {
let mut stream = me.store.resolve(self.key);
try_ready!(me.actions.recv.poll_pushed(&mut stream))
};
Ok(Async::Ready(res.map(|(h, key)| {
me.refs += 1;
let opaque_ref =
OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key));
(h, opaque_ref)
})))
let mut stream = me.store.resolve(self.key);
me.actions
.recv
.poll_pushed(cx, &mut stream)
.map_ok_(|(h, key)| {
me.refs += 1;
let opaque_ref =
OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key));
(h, opaque_ref)
})
}
pub fn body_is_empty(&self) -> bool {
@@ -1064,22 +1122,22 @@ impl OpaqueStreamRef {
me.actions.recv.is_end_stream(&stream)
}
pub fn poll_data(&mut self) -> Poll<Option<Bytes>, proto::Error> {
pub fn poll_data(&mut self, cx: &Context) -> Poll<Option<Result<Bytes, proto::Error>>> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_data(&mut stream)
me.actions.recv.poll_data(cx, &mut stream)
}
pub fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, proto::Error> {
pub fn poll_trailers(&mut self, cx: &Context) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_trailers(&mut stream)
me.actions.recv.poll_trailers(cx, &mut stream)
}
/// Releases recv capacity back to the peer. This may result in sending
@@ -1101,16 +1159,11 @@ impl OpaqueStreamRef {
let mut stream = me.store.resolve(self.key);
me.actions
.recv
.clear_recv_buffer(&mut stream);
me.actions.recv.clear_recv_buffer(&mut stream);
}
pub fn stream_id(&self) -> StreamId {
self.inner.lock()
.unwrap()
.store[self.key]
.id
self.inner.lock().unwrap().store[self.key].id
}
}
@@ -1125,17 +1178,15 @@ impl fmt::Debug for OpaqueStreamRef {
.field("stream_id", &stream.id)
.field("ref_count", &stream.ref_count)
.finish()
},
Err(Poisoned(_)) => {
fmt.debug_struct("OpaqueStreamRef")
.field("inner", &"<Poisoned>")
.finish()
}
Err(WouldBlock) => {
fmt.debug_struct("OpaqueStreamRef")
.field("inner", &"<Locked>")
.finish()
}
Err(Poisoned(_)) => fmt
.debug_struct("OpaqueStreamRef")
.field("inner", &"<Poisoned>")
.finish(),
Err(WouldBlock) => fmt
.debug_struct("OpaqueStreamRef")
.field("inner", &"<Locked>")
.finish(),
}
}
}
@@ -1164,12 +1215,14 @@ impl Drop for OpaqueStreamRef {
fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
let mut me = match inner.lock() {
Ok(inner) => inner,
Err(_) => if ::std::thread::panicking() {
log::trace!("StreamRef::drop; mutex poisoned");
return;
} else {
panic!("StreamRef::drop; mutex poisoned");
},
Err(_) => {
if ::std::thread::panicking() {
log::trace!("StreamRef::drop; mutex poisoned");
return;
} else {
panic!("StreamRef::drop; mutex poisoned");
}
}
};
let me = &mut *me;
@@ -1189,19 +1242,19 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
// (connection) so that it can close properly
if stream.ref_count == 0 && stream.is_closed() {
if let Some(task) = actions.task.take() {
task.notify();
task.wake();
}
}
me.counts.transition(stream, |counts, stream| {
maybe_cancel(stream, actions, counts);
if stream.ref_count == 0 {
// Release any recv window back to connection, no one can access
// it anymore.
actions.recv.release_closed_capacity(stream, &mut actions.task);
actions
.recv
.release_closed_capacity(stream, &mut actions.task);
// We won't be able to reach our push promises anymore
let mut ppp = stream.pending_push_promises.take();
@@ -1216,11 +1269,9 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) {
if stream.is_canceled_interest() {
actions.send.schedule_implicit_reset(
stream,
Reason::CANCEL,
counts,
&mut actions.task);
actions
.send
.schedule_implicit_reset(stream, Reason::CANCEL, counts, &mut actions.task);
actions.recv.enqueue_reset_expiration(stream, counts);
}
}
@@ -1245,8 +1296,8 @@ impl Actions {
send_buffer: &mut Buffer<Frame<B>>,
) {
counts.transition(stream, |counts, stream| {
self.send.send_reset(
reason, send_buffer, stream, counts, &mut self.task);
self.send
.send_reset(reason, send_buffer, stream, counts, &mut self.task);
self.recv.enqueue_reset_expiration(stream, counts);
// if a RecvStream is parked, ensure it's notified
stream.notify_recv();
@@ -1260,12 +1311,10 @@ impl Actions {
counts: &mut Counts,
res: Result<(), RecvError>,
) -> Result<(), RecvError> {
if let Err(RecvError::Stream {
reason, ..
}) = res
{
if let Err(RecvError::Stream { reason, .. }) = res {
// Reset the stream.
self.send.send_reset(reason, buffer, stream, counts, &mut self.task);
self.send
.send_reset(reason, buffer, stream, counts, &mut self.task);
Ok(())
} else {
res
@@ -1308,11 +1357,7 @@ impl Actions {
}
}
fn clear_queues(&mut self,
clear_pending_accept: bool,
store: &mut Store,
counts: &mut Counts)
{
fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) {
self.recv.clear_queues(clear_pending_accept, store, counts);
self.send.clear_queues(store, counts);
}