Update lib to std-future

This commit is contained in:
Gurwinder Singh
2019-08-15 08:25:14 +05:30
committed by Sean McArthur
parent 782f1f712c
commit c8fefd49f1
19 changed files with 1125 additions and 1038 deletions

View File

@@ -15,10 +15,10 @@ addons:
matrix: matrix:
include: include:
- rust: nightly - rust: nightly
- rust: stable # - rust: stable
before_deploy: cargo doc --no-deps before_deploy: cargo doc --no-deps
allow_failures: # allow_failures:
- rust: nightly # - rust: nightly
before_script: before_script:
- cargo clean - cargo clean
@@ -39,8 +39,8 @@ script:
# Run integration tests # Run integration tests
- cargo test -p h2-tests - cargo test -p h2-tests
# Run h2spec on stable # Run h2spec on nightly for the time being. TODO: Change it to stable after Rust 1.38 release
- if [ "${TRAVIS_RUST_VERSION}" = "stable" ]; then ./ci/h2spec.sh; fi - if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then ./ci/h2spec.sh; fi
# Check minimal versions # Check minimal versions
- if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then cargo clean; cargo check -Z minimal-versions; fi - if [ "${TRAVIS_RUST_VERSION}" = "nightly" ]; then cargo clean; cargo check -Z minimal-versions; fi

View File

@@ -41,8 +41,9 @@ members = [
] ]
[dependencies] [dependencies]
futures = "0.1" futures-preview = "0.3.0-alpha.18"
tokio-io = "0.1.4" tokio-io = { git = "https://github.com/tokio-rs/tokio" }
tokio-codec = { git = "https://github.com/tokio-rs/tokio" }
bytes = "0.4.7" bytes = "0.4.7"
http = "0.1.8" http = "0.1.8"
log = "0.4.1" log = "0.4.1"
@@ -64,7 +65,7 @@ serde = "1.0.0"
serde_json = "1.0.0" serde_json = "1.0.0"
# Akamai example # Akamai example
tokio = "0.1.8" tokio = { git = "https://github.com/tokio-rs/tokio" }
env_logger = { version = "0.5.3", default-features = false } env_logger = { version = "0.5.3", default-features = false }
rustls = "0.12" rustls = "0.12"
tokio-rustls = "0.5.0" tokio-rustls = "0.5.0"

View File

@@ -64,72 +64,60 @@
//! //!
//! # Example //! # Example
//! //!
//! ```rust //! ```rust, no_run
//! #![feature(async_await)]
//!
//! use h2::client; //! use h2::client;
//! //!
//! use futures::*; //! use futures::*;
//! use http::*; //! use http::{Request, Method};
//! //! use std::error::Error;
//! use tokio::net::TcpStream; //! use tokio::net::TcpStream;
//! //!
//! pub fn main() { //! #[tokio::main]
//! pub async fn main() -> Result<(), Box<dyn Error>> {
//! let addr = "127.0.0.1:5928".parse().unwrap(); //! let addr = "127.0.0.1:5928".parse().unwrap();
//!
//! // Establish TCP connection to the server.
//! let tcp = TcpStream::connect(&addr).await?;
//! let (h2, connection) = client::handshake(tcp).await?;
//! tokio::spawn(async move {
//! connection.await.unwrap();
//! });
//! //!
//! tokio::run( //! let mut h2 = h2.ready().await?;
//! // Establish TCP connection to the server. //! // Prepare the HTTP request to send to the server.
//! TcpStream::connect(&addr) //! let request = Request::builder()
//! .map_err(|_| {
//! panic!("failed to establish TCP connection")
//! })
//! .and_then(|tcp| client::handshake(tcp))
//! .and_then(|(h2, connection)| {
//! let connection = connection
//! .map_err(|_| panic!("HTTP/2.0 connection failed"));
//!
//! // Spawn a new task to drive the connection state
//! tokio::spawn(connection);
//!
//! // Wait until the `SendRequest` handle has available
//! // capacity.
//! h2.ready()
//! })
//! .and_then(|mut h2| {
//! // Prepare the HTTP request to send to the server.
//! let request = Request::builder()
//! .method(Method::GET) //! .method(Method::GET)
//! .uri("https://www.example.com/") //! .uri("https://www.example.com/")
//! .body(()) //! .body(())
//! .unwrap(); //! .unwrap();
//! //!
//! // Send the request. The second tuple item allows the caller //! // Send the request. The second tuple item allows the caller
//! // to stream a request body. //! // to stream a request body.
//! let (response, _) = h2.send_request(request, true).unwrap(); //! let (response, _) = h2.send_request(request, true).unwrap();
//!
//! let (head, mut body) = response.await?.into_parts();
//! //!
//! response.and_then(|response| { //! println!("Received response: {:?}", head);
//! let (head, mut body) = response.into_parts();
//! //!
//! println!("Received response: {:?}", head); //! // The `release_capacity` handle allows the caller to manage
//! // flow control.
//! //
//! // Whenever data is received, the caller is responsible for
//! // releasing capacity back to the server once it has freed
//! // the data from memory.
//! let mut release_capacity = body.release_capacity().clone();
//! //!
//! // The `release_capacity` handle allows the caller to manage //! while let Some(chunk) = body.next().await {
//! // flow control. //! let chunk = chunk?;
//! // //! println!("RX: {:?}", chunk);
//! // Whenever data is received, the caller is responsible for
//! // releasing capacity back to the server once it has freed
//! // the data from memory.
//! let mut release_capacity = body.release_capacity().clone();
//! //!
//! body.for_each(move |chunk| { //! // Let the server send more data.
//! println!("RX: {:?}", chunk); //! let _ = release_capacity.release_capacity(chunk.len());
//! }
//! //!
//! // Let the server send more data. //! Ok(())
//! let _ = release_capacity.release_capacity(chunk.len());
//!
//! Ok(())
//! })
//! })
//! })
//! .map_err(|e| panic!("failed to perform HTTP/2.0 request: {:?}", e))
//! )
//! } //! }
//! ``` //! ```
//! //!
@@ -151,21 +139,23 @@
//! [`Builder`]: struct.Builder.html //! [`Builder`]: struct.Builder.html
//! [`Error`]: ../struct.Error.html //! [`Error`]: ../struct.Error.html
use crate::{SendStream, RecvStream, ReleaseCapacity, PingPong};
use crate::codec::{Codec, RecvError, SendError, UserError}; use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId};
use crate::proto; use crate::proto;
use crate::{PingPong, RecvStream, ReleaseCapacity, SendStream};
use bytes::{Bytes, IntoBuf}; use bytes::{Bytes, IntoBuf};
use futures::{Async, Future, Poll, Stream, try_ready}; use futures::{ready, FutureExt, Stream};
use http::{uri, HeaderMap, Request, Response, Method, Version}; use http::{uri, HeaderMap, Method, Request, Response, Version};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::WriteAll;
use std::fmt; use std::fmt;
use std::future::Future;
use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use std::usize; use std::usize;
use tokio_io::{AsyncRead, AsyncWrite, AsyncWriteExt};
/// Performs the HTTP/2.0 connection handshake. /// Performs the HTTP/2.0 connection handshake.
/// ///
@@ -182,9 +172,9 @@ use std::usize;
/// ///
/// [module]: index.html /// [module]: index.html
#[must_use = "futures do nothing unless polled"] #[must_use = "futures do nothing unless polled"]
pub struct Handshake<T, B = Bytes> { pub struct Handshake<'a, T, B = Bytes> {
builder: Builder, builder: Builder,
inner: WriteAll<T, &'static [u8]>, inner: Pin<Box<dyn Future<Output = io::Result<T>> + 'a>>,
_marker: PhantomData<fn(B)>, _marker: PhantomData<fn(B)>,
} }
@@ -246,31 +236,20 @@ pub struct ReadySendRequest<B: IntoBuf> {
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// # use futures::{Future, Stream}; /// #![feature(async_await)]
/// # use futures::future::Executor;
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client; /// # use h2::client;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T, E>(my_io: T, my_executor: E) /// # async fn doc<T>(my_io: T)
/// # where T: AsyncRead + AsyncWrite + 'static, /// # where T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
/// # E: Executor<Box<Future<Item = (), Error = ()>>>,
/// # { /// # {
/// client::handshake(my_io) /// let (send_request, connection) = client::handshake(my_io).await.unwrap();
/// .and_then(|(send_request, connection)| { /// // Submit the connection handle to an executor.
/// // Submit the connection handle to an executor. /// tokio::spawn(async { connection.await.expect("connection failed"); });
/// my_executor.execute(
/// # Box::new(
/// connection.map_err(|_| panic!("connection failed"))
/// # )
/// ).unwrap();
/// ///
/// // Now, use `send_request` to initialize HTTP/2.0 streams. /// // Now, use `send_request` to initialize HTTP/2.0 streams.
/// // ... /// // ...
/// # drop(send_request);
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # /// #
/// # pub fn main() {} /// # pub fn main() {}
@@ -338,8 +317,8 @@ pub struct PushPromises {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -384,23 +363,23 @@ pub(crate) struct Peer;
impl<B> SendRequest<B> impl<B> SendRequest<B>
where where
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: 'static, B::Buf: Unpin + 'static,
{ {
/// Returns `Ready` when the connection can initialize a new HTTP/2.0 /// Returns `Ready` when the connection can initialize a new HTTP/2.0
/// stream. /// stream.
/// ///
/// This function must return `Ready` before `send_request` is called. When /// This function must return `Ready` before `send_request` is called. When
/// `NotReady` is returned, the task will be notified once the readiness /// `Poll::Pending` is returned, the task will be notified once the readiness
/// state changes. /// state changes.
/// ///
/// See [module] level docs for more details. /// See [module] level docs for more details.
/// ///
/// [module]: index.html /// [module]: index.html
pub fn poll_ready(&mut self) -> Poll<(), crate::Error> { pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), crate::Error>> {
try_ready!(self.inner.poll_pending_open(self.pending.as_ref())); ready!(self.inner.poll_pending_open(cx, self.pending.as_ref()))?;
self.pending = None; self.pending = None;
Ok(().into()) Poll::Ready(Ok(()))
} }
/// Consumes `self`, returning a future that returns `self` back once it is /// Consumes `self`, returning a future that returns `self` back once it is
@@ -415,19 +394,15 @@ where
/// # Examples /// # Examples
/// ///
/// ```rust /// ```rust
/// # use futures::*; /// #![feature(async_await)]
/// # use h2::client::*; /// # use h2::client::*;
/// # use http::*; /// # use http::*;
/// # fn doc(send_request: SendRequest<&'static [u8]>) /// # async fn doc(send_request: SendRequest<&'static [u8]>)
/// # { /// # {
/// // First, wait until the `send_request` handle is ready to send a new /// // First, wait until the `send_request` handle is ready to send a new
/// // request /// // request
/// send_request.ready() /// let mut send_request = send_request.ready().await.unwrap();
/// .and_then(|mut send_request| { /// // Use `send_request` here.
/// // Use `send_request` here.
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
@@ -479,32 +454,24 @@ where
/// Sending a request with no body /// Sending a request with no body
/// ///
/// ```rust /// ```rust
/// # use futures::*; /// #![feature(async_await)]
/// # use h2::client::*; /// # use h2::client::*;
/// # use http::*; /// # use http::*;
/// # fn doc(send_request: SendRequest<&'static [u8]>) /// # async fn doc(send_request: SendRequest<&'static [u8]>)
/// # { /// # {
/// // First, wait until the `send_request` handle is ready to send a new /// // First, wait until the `send_request` handle is ready to send a new
/// // request /// // request
/// send_request.ready() /// let mut send_request = send_request.ready().await.unwrap();
/// .and_then(|mut send_request| { /// // Prepare the HTTP request to send to the server.
/// // Prepare the HTTP request to send to the server. /// let request = Request::get("https://www.example.com/")
/// let request = Request::get("https://www.example.com/") /// .body(())
/// .body(()) /// .unwrap();
/// .unwrap();
/// ///
/// // Send the request to the server. Since we are not sending a /// // Send the request to the server. Since we are not sending a
/// // body or trailers, we can drop the `SendStream` instance. /// // body or trailers, we can drop the `SendStream` instance.
/// let (response, _) = send_request /// let (response, _) = send_request.send_request(request, true).unwrap();
/// .send_request(request, true).unwrap(); /// let response = response.await.unwrap();
/// /// // Process the response
/// response
/// })
/// .and_then(|response| {
/// // Process the response
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
@@ -512,48 +479,43 @@ where
/// Sending a request with a body and trailers /// Sending a request with a body and trailers
/// ///
/// ```rust /// ```rust
/// # use futures::*; /// #![feature(async_await)]
/// # use h2::client::*; /// # use h2::client::*;
/// # use http::*; /// # use http::*;
/// # fn doc(send_request: SendRequest<&'static [u8]>) /// # async fn doc(send_request: SendRequest<&'static [u8]>)
/// # { /// # {
/// // First, wait until the `send_request` handle is ready to send a new /// // First, wait until the `send_request` handle is ready to send a new
/// // request /// // request
/// send_request.ready() /// let mut send_request = send_request.ready().await.unwrap();
/// .and_then(|mut send_request| {
/// // Prepare the HTTP request to send to the server.
/// let request = Request::get("https://www.example.com/")
/// .body(())
/// .unwrap();
/// ///
/// // Send the request to the server. If we are not sending a /// // Prepare the HTTP request to send to the server.
/// // body or trailers, we can drop the `SendStream` instance. /// let request = Request::get("https://www.example.com/")
/// let (response, mut send_stream) = send_request /// .body(())
/// .send_request(request, false).unwrap(); /// .unwrap();
/// ///
/// // At this point, one option would be to wait for send capacity. /// // Send the request to the server. If we are not sending a
/// // Doing so would allow us to not hold data in memory that /// // body or trailers, we can drop the `SendStream` instance.
/// // cannot be sent. However, this is not a requirement, so this /// let (response, mut send_stream) = send_request
/// // example will skip that step. See `SendStream` documentation /// .send_request(request, false).unwrap();
/// // for more details.
/// send_stream.send_data(b"hello", false).unwrap();
/// send_stream.send_data(b"world", false).unwrap();
/// ///
/// // Send the trailers. /// // At this point, one option would be to wait for send capacity.
/// let mut trailers = HeaderMap::new(); /// // Doing so would allow us to not hold data in memory that
/// trailers.insert( /// // cannot be sent. However, this is not a requirement, so this
/// header::HeaderName::from_bytes(b"my-trailer").unwrap(), /// // example will skip that step. See `SendStream` documentation
/// header::HeaderValue::from_bytes(b"hello").unwrap()); /// // for more details.
/// send_stream.send_data(b"hello", false).unwrap();
/// send_stream.send_data(b"world", false).unwrap();
/// ///
/// send_stream.send_trailers(trailers).unwrap(); /// // Send the trailers.
/// let mut trailers = HeaderMap::new();
/// trailers.insert(
/// header::HeaderName::from_bytes(b"my-trailer").unwrap(),
/// header::HeaderValue::from_bytes(b"hello").unwrap());
/// ///
/// response /// send_stream.send_trailers(trailers).unwrap();
/// }) ///
/// .and_then(|response| { /// let response = response.await.unwrap();
/// // Process the response /// // Process the response
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
@@ -634,21 +596,21 @@ where
// ===== impl ReadySendRequest ===== // ===== impl ReadySendRequest =====
impl<B> Future for ReadySendRequest<B> impl<B> Future for ReadySendRequest<B>
where B: IntoBuf, where
B::Buf: 'static, B: IntoBuf + Unpin,
B::Buf: Unpin + 'static,
{ {
type Item = SendRequest<B>; type Output = Result<SendRequest<B>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.inner { match &mut self.inner {
Some(ref mut send_request) => { Some(send_request) => {
let _ = try_ready!(send_request.poll_ready()); let _ = ready!(send_request.poll_ready(cx))?;
} }
None => panic!("called `poll` after future completed"), None => panic!("called `poll` after future completed"),
} }
Ok(self.inner.take().unwrap().into()) Poll::Ready(Ok(self.inner.take().unwrap()))
} }
} }
@@ -666,8 +628,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -707,8 +669,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -741,8 +703,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -774,8 +736,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -813,8 +775,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -861,8 +823,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -901,8 +863,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -945,8 +907,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -990,8 +952,8 @@ impl Builder {
/// # use h2::client::*; /// # use h2::client::*;
/// # use std::time::Duration; /// # use std::time::Duration;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -1027,8 +989,8 @@ impl Builder {
/// # use h2::client::*; /// # use h2::client::*;
/// # use std::time::Duration; /// # use std::time::Duration;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -1081,8 +1043,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<'a, T>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -1101,8 +1063,8 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # -> Handshake<T, &'static [u8]> /// # -> Handshake<'a, T, &'static [u8]>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake. /// // handshake.
@@ -1113,11 +1075,11 @@ impl Builder {
/// # /// #
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B> pub fn handshake<'a, T, B>(&self, io: T) -> Handshake<'a, T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin + 'a,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: 'static, B::Buf: Unpin + 'static,
{ {
Connection::handshake2(io, self.clone()) Connection::handshake2(io, self.clone())
} }
@@ -1149,45 +1111,41 @@ impl Default for Builder {
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// # use futures::*; /// #![feature(async_await)]
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::client; /// # use h2::client;
/// # use h2::client::*; /// # use h2::client::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # async fn doc<'a, T: AsyncRead + AsyncWrite + Unpin + 'a>(my_io: T)
/// # { /// # {
/// client::handshake(my_io) /// let (send_request, connection) = client::handshake(my_io).await.unwrap();
/// .and_then(|(send_request, connection)| { /// // The HTTP/2.0 handshake has completed, now start polling
/// // The HTTP/2.0 handshake has completed, now start polling /// // `connection` and use `send_request` to send requests to the
/// // `connection` and use `send_request` to send requests to the /// // server.
/// // server.
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # /// #
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
pub fn handshake<T>(io: T) -> Handshake<T, Bytes> pub fn handshake<'a, T>(io: T) -> Handshake<'a, T, Bytes>
where T: AsyncRead + AsyncWrite, where
T: AsyncRead + AsyncWrite + Unpin + 'a,
{ {
Builder::new().handshake(io) Builder::new().handshake(io)
} }
// ===== impl Connection ===== // ===== impl Connection =====
impl<T, B> Connection<T, B> impl<'a, T, B> Connection<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin + 'a,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
fn handshake2(io: T, builder: Builder) -> Handshake<T, B> { fn handshake2(mut io: T, builder: Builder) -> Handshake<'a, T, B> {
use tokio_io::io;
log::debug!("binding client connection"); log::debug!("binding client connection");
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
let handshake = io::write_all(io, msg); let handshake = Box::pin(async move { io.write_all(msg).await.map(|_| io) });
Handshake { Handshake {
builder, builder,
@@ -1224,23 +1182,21 @@ where
/// ///
/// This may only be called once. Calling multiple times will return `None`. /// This may only be called once. Calling multiple times will return `None`.
pub fn ping_pong(&mut self) -> Option<PingPong> { pub fn ping_pong(&mut self) -> Option<PingPong> {
self.inner self.inner.take_user_pings().map(PingPong::new)
.take_user_pings()
.map(PingPong::new)
} }
} }
impl<T, B> Future for Connection<T, B> impl<T, B> Future for Connection<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
type Item = (); type Output = Result<(), crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<(), crate::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.inner.maybe_close_connection_if_no_streams(); self.inner.maybe_close_connection_if_no_streams();
self.inner.poll().map_err(Into::into) self.inner.poll(cx).map_err(Into::into)
} }
} }
@@ -1258,20 +1214,16 @@ where
// ===== impl Handshake ===== // ===== impl Handshake =====
impl<T, B> Future for Handshake<T, B> impl<'a, T, B> Future for Handshake<'_, T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin + 'a,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: 'static, B::Buf: Unpin + 'static,
{ {
type Item = (SendRequest<B>, Connection<T, B>); type Output = Result<(SendRequest<B>, Connection<T, B>), crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = self.inner.poll() let io = ready!(self.inner.poll_unpin(cx))?;
.map_err(crate::Error::from);
let (io, _) = try_ready!(res);
log::debug!("client connection bound"); log::debug!("client connection bound");
@@ -1291,13 +1243,16 @@ where
.buffer(self.builder.settings.clone().into()) .buffer(self.builder.settings.clone().into())
.expect("invalid SETTINGS frame"); .expect("invalid SETTINGS frame");
let inner = proto::Connection::new(codec, proto::Config { let inner = proto::Connection::new(
next_stream_id: self.builder.stream_id, codec,
initial_max_send_streams: self.builder.initial_max_send_streams, proto::Config {
reset_stream_duration: self.builder.reset_stream_duration, next_stream_id: self.builder.stream_id,
reset_stream_max: self.builder.reset_stream_max, initial_max_send_streams: self.builder.initial_max_send_streams,
settings: self.builder.settings.clone(), reset_stream_duration: self.builder.reset_stream_duration,
}); reset_stream_max: self.builder.reset_stream_max,
settings: self.builder.settings.clone(),
},
);
let send_request = SendRequest { let send_request = SendRequest {
inner: inner.streams().clone(), inner: inner.streams().clone(),
pending: None, pending: None,
@@ -1308,11 +1263,11 @@ where
connection.set_target_window_size(sz); connection.set_target_window_size(sz);
} }
Ok(Async::Ready((send_request, connection))) Poll::Ready(Ok((send_request, connection)))
} }
} }
impl<T, B> fmt::Debug for Handshake<T, B> impl<T, B> fmt::Debug for Handshake<'_, T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
T: fmt::Debug, T: fmt::Debug,
@@ -1327,14 +1282,13 @@ where
// ===== impl ResponseFuture ===== // ===== impl ResponseFuture =====
impl Future for ResponseFuture { impl Future for ResponseFuture {
type Item = Response<RecvStream>; type Output = Result<Response<RecvStream>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let (parts, _) = try_ready!(self.inner.poll_response()).into_parts(); let (parts, _) = ready!(self.inner.poll_response(cx))?.into_parts();
let body = RecvStream::new(ReleaseCapacity::new(self.inner.clone())); let body = RecvStream::new(ReleaseCapacity::new(self.inner.clone()));
Ok(Response::from_parts(parts, body).into()) Poll::Ready(Ok(Response::from_parts(parts, body).into()))
} }
} }
@@ -1358,27 +1312,31 @@ impl ResponseFuture {
panic!("Reference to push promises stream taken!"); panic!("Reference to push promises stream taken!");
} }
self.push_promise_consumed = true; self.push_promise_consumed = true;
PushPromises { inner: self.inner.clone() } PushPromises {
inner: self.inner.clone(),
}
} }
} }
// ===== impl PushPromises ===== // ===== impl PushPromises =====
impl Stream for PushPromises { impl Stream for PushPromises {
type Item = PushPromise; type Item = Result<PushPromise, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match try_ready!(self.inner.poll_pushed()) { match self.inner.poll_pushed(cx) {
Some((request, response)) => { Poll::Ready(Some(Ok((request, response)))) => {
let response = PushedResponseFuture { let response = PushedResponseFuture {
inner: ResponseFuture { inner: ResponseFuture {
inner: response, push_promise_consumed: false inner: response,
} push_promise_consumed: false,
},
}; };
Ok(Async::Ready(Some(PushPromise{request, response}))) Poll::Ready(Some(Ok(PushPromise { request, response })))
} }
None => Ok(Async::Ready(None)), Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
} }
} }
} }
@@ -1406,11 +1364,10 @@ impl PushPromise {
// ===== impl PushedResponseFuture ===== // ===== impl PushedResponseFuture =====
impl Future for PushedResponseFuture { impl Future for PushedResponseFuture {
type Item = Response<RecvStream>; type Output = Result<Response<RecvStream>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.inner.poll() self.inner.poll_unpin(cx)
} }
} }
@@ -1431,8 +1388,8 @@ impl Peer {
pub fn convert_send_message( pub fn convert_send_message(
id: StreamId, id: StreamId,
request: Request<()>, request: Request<()>,
end_of_stream: bool) -> Result<Headers, SendError> end_of_stream: bool,
{ ) -> Result<Headers, SendError> {
use http::request::Parts; use http::request::Parts;
let ( let (
@@ -1503,7 +1460,9 @@ impl proto::Peer for Peer {
} }
fn convert_poll_message( fn convert_poll_message(
pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId pseudo: Pseudo,
fields: HeaderMap,
stream_id: StreamId,
) -> Result<Self::Poll, RecvError> { ) -> Result<Self::Poll, RecvError> {
let mut b = Response::builder(); let mut b = Response::builder();
@@ -1522,7 +1481,7 @@ impl proto::Peer for Peer {
id: stream_id, id: stream_id,
reason: Reason::PROTOCOL_ERROR, reason: Reason::PROTOCOL_ERROR,
}); });
}, }
}; };
*response.headers_mut() = fields; *response.headers_mut() = fields;

View File

@@ -1,24 +1,29 @@
use crate::codec::RecvError; use crate::codec::RecvError;
use crate::frame::{self, Frame, Kind, Reason}; use crate::frame::{self, Frame, Kind, Reason};
use crate::frame::{DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE}; use crate::frame::{
DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE,
};
use crate::hpack; use crate::hpack;
use futures::*; use futures::{ready, Stream};
use bytes::BytesMut; use bytes::BytesMut;
use std::io; use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio_codec::{LengthDelimitedCodec, LengthDelimitedCodecError};
use tokio_codec::FramedRead as InnerFramedRead;
use tokio_io::AsyncRead; use tokio_io::AsyncRead;
use tokio_io::codec::length_delimited;
// 16 MB "sane default" taken from golang http2 // 16 MB "sane default" taken from golang http2
const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20; const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20;
#[derive(Debug)] #[derive(Debug)]
pub struct FramedRead<T> { pub struct FramedRead<T> {
inner: length_delimited::FramedRead<T>, inner: InnerFramedRead<T, LengthDelimitedCodec>,
// hpack decoder state // hpack decoder state
hpack: hpack::Decoder, hpack: hpack::Decoder,
@@ -45,7 +50,7 @@ enum Continuable {
} }
impl<T> FramedRead<T> { impl<T> FramedRead<T> {
pub fn new(inner: length_delimited::FramedRead<T>) -> FramedRead<T> { pub fn new(inner: InnerFramedRead<T, LengthDelimitedCodec>) -> FramedRead<T> {
FramedRead { FramedRead {
inner: inner, inner: inner,
hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE), hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE),
@@ -138,24 +143,27 @@ impl<T> FramedRead<T> {
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
}
Kind::Ping => { Kind::Ping => {
let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load PING frame; err={:?}", e); proto_err!(conn: "failed to load PING frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
}
Kind::WindowUpdate => { Kind::WindowUpdate => {
let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
}
Kind::Data => { Kind::Data => {
let _ = bytes.split_to(frame::HEADER_LEN); let _ = bytes.split_to(frame::HEADER_LEN);
let res = frame::Data::load(head, bytes.freeze()); let res = frame::Data::load(head, bytes.freeze());
@@ -164,28 +172,27 @@ impl<T> FramedRead<T> {
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load DATA frame; err={:?}", e); proto_err!(conn: "failed to load DATA frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
Kind::Headers => { }
header_block!(Headers, head, bytes) Kind::Headers => header_block!(Headers, head, bytes),
},
Kind::Reset => { Kind::Reset => {
let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load RESET frame; err={:?}", e); proto_err!(conn: "failed to load RESET frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
}
Kind::GoAway => { Kind::GoAway => {
let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
res.map_err(|e| { res.map_err(|e| {
proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR) Connection(Reason::PROTOCOL_ERROR)
})?.into() })?
}, .into()
Kind::PushPromise => { }
header_block!(PushPromise, head, bytes) Kind::PushPromise => header_block!(PushPromise, head, bytes),
},
Kind::Priority => { Kind::Priority => {
if head.stream_id() == 0 { if head.stream_id() == 0 {
// Invalid stream identifier // Invalid stream identifier
@@ -205,13 +212,13 @@ impl<T> FramedRead<T> {
id, id,
reason: Reason::PROTOCOL_ERROR, reason: Reason::PROTOCOL_ERROR,
}); });
}, }
Err(e) => { Err(e) => {
proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
return Err(Connection(Reason::PROTOCOL_ERROR)); return Err(Connection(Reason::PROTOCOL_ERROR));
} }
} }
}, }
Kind::Continuation => { Kind::Continuation => {
let is_end_headers = (head.flag() & 0x4) == 0x4; let is_end_headers = (head.flag() & 0x4) == 0x4;
@@ -229,8 +236,6 @@ impl<T> FramedRead<T> {
return Err(Connection(Reason::PROTOCOL_ERROR)); return Err(Connection(Reason::PROTOCOL_ERROR));
} }
// Extend the buf // Extend the buf
if partial.buf.is_empty() { if partial.buf.is_empty() {
partial.buf = bytes.split_off(frame::HEADER_LEN); partial.buf = bytes.split_off(frame::HEADER_LEN);
@@ -257,9 +262,14 @@ impl<T> FramedRead<T> {
partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
} }
match partial.frame.load_hpack(&mut partial.buf, self.max_header_list_size, &mut self.hpack) { match partial.frame.load_hpack(
Ok(_) => {}, &mut partial.buf,
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, self.max_header_list_size,
&mut self.hpack,
) {
Ok(_) => {}
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_)))
if !is_end_headers => {}
Err(frame::Error::MalformedMessage) => { Err(frame::Error::MalformedMessage) => {
let id = head.stream_id(); let id = head.stream_id();
proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
@@ -267,11 +277,11 @@ impl<T> FramedRead<T> {
id, id,
reason: Reason::PROTOCOL_ERROR, reason: Reason::PROTOCOL_ERROR,
}); });
}, }
Err(e) => { Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e); proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR)); return Err(Connection(Reason::PROTOCOL_ERROR));
}, }
} }
if is_end_headers { if is_end_headers {
@@ -280,11 +290,11 @@ impl<T> FramedRead<T> {
self.partial = Some(partial); self.partial = Some(partial);
return Ok(None); return Ok(None);
} }
}, }
Kind::Unknown => { Kind::Unknown => {
// Unknown frames are ignored // Unknown frames are ignored
return Ok(None); return Ok(None);
}, }
}; };
Ok(Some(frame)) Ok(Some(frame))
@@ -302,7 +312,7 @@ impl<T> FramedRead<T> {
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
#[inline] #[inline]
pub fn max_frame_size(&self) -> usize { pub fn max_frame_size(&self) -> usize {
self.inner.max_frame_length() self.inner.decoder().max_frame_length()
} }
/// Updates the max frame size setting. /// Updates the max frame size setting.
@@ -311,7 +321,7 @@ impl<T> FramedRead<T> {
#[inline] #[inline]
pub fn set_max_frame_size(&mut self, val: usize) { pub fn set_max_frame_size(&mut self, val: usize) {
assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize); assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize);
self.inner.set_max_frame_length(val) self.inner.decoder_mut().set_max_frame_length(val)
} }
/// Update the max header list size setting. /// Update the max header list size setting.
@@ -323,34 +333,32 @@ impl<T> FramedRead<T> {
impl<T> Stream for FramedRead<T> impl<T> Stream for FramedRead<T>
where where
T: AsyncRead, T: AsyncRead + Unpin,
{ {
type Item = Frame; type Item = Result<Frame, RecvError>;
type Error = RecvError;
fn poll(&mut self) -> Poll<Option<Frame>, Self::Error> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop { loop {
log::trace!("poll"); log::trace!("poll");
let bytes = match try_ready!(self.inner.poll().map_err(map_err)) { let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
Some(bytes) => bytes, Some(Ok(bytes)) => bytes,
None => return Ok(Async::Ready(None)), Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))),
None => return Poll::Ready(None),
}; };
log::trace!("poll; bytes={}B", bytes.len()); log::trace!("poll; bytes={}B", bytes.len());
if let Some(frame) = self.decode_frame(bytes)? { if let Some(frame) = self.decode_frame(bytes)? {
log::debug!("received; frame={:?}", frame); log::debug!("received; frame={:?}", frame);
return Ok(Async::Ready(Some(frame))); return Poll::Ready(Some(Ok(frame)));
} }
} }
} }
} }
fn map_err(err: io::Error) -> RecvError { fn map_err(err: io::Error) -> RecvError {
use tokio_io::codec::length_delimited::FrameTooBig;
if let io::ErrorKind::InvalidData = err.kind() { if let io::ErrorKind::InvalidData = err.kind() {
if let Some(custom) = err.get_ref() { if let Some(custom) = err.get_ref() {
if custom.is::<FrameTooBig>() { if custom.is::<LengthDelimitedCodecError>() {
return RecvError::Connection(Reason::FRAME_SIZE_ERROR); return RecvError::Connection(Reason::FRAME_SIZE_ERROR);
} }
} }

View File

@@ -4,8 +4,10 @@ use crate::frame::{self, Frame, FrameSize};
use crate::hpack; use crate::hpack;
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use futures::*; use futures::ready;
use tokio_io::{AsyncRead, AsyncWrite, try_nb}; use std::pin::Pin;
use std::task::{Context, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use std::io::{self, Cursor}; use std::io::{self, Cursor};
@@ -55,12 +57,12 @@ const CHAIN_THRESHOLD: usize = 256;
// TODO: Make generic // TODO: Make generic
impl<T, B> FramedWrite<T, B> impl<T, B> FramedWrite<T, B>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf,
{ {
pub fn new(inner: T) -> FramedWrite<T, B> { pub fn new(inner: T) -> FramedWrite<T, B> {
FramedWrite { FramedWrite {
inner: inner, inner,
hpack: hpack::Encoder::default(), hpack: hpack::Encoder::default(),
buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
next: None, next: None,
@@ -73,17 +75,17 @@ where
/// ///
/// Calling this function may result in the current contents of the buffer /// Calling this function may result in the current contents of the buffer
/// to be flushed to `T`. /// to be flushed to `T`.
pub fn poll_ready(&mut self) -> Poll<(), io::Error> { pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
if !self.has_capacity() { if !self.has_capacity() {
// Try flushing // Try flushing
self.flush()?; ready!(self.flush(cx))?;
if !self.has_capacity() { if !self.has_capacity() {
return Ok(Async::NotReady); return Poll::Pending;
} }
} }
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
/// Buffer a frame. /// Buffer a frame.
@@ -123,33 +125,33 @@ where
// Save off the last frame... // Save off the last frame...
self.last_data_frame = Some(v); self.last_data_frame = Some(v);
} }
}, }
Frame::Headers(v) => { Frame::Headers(v) => {
if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) {
self.next = Some(Next::Continuation(continuation)); self.next = Some(Next::Continuation(continuation));
} }
}, }
Frame::PushPromise(v) => { Frame::PushPromise(v) => {
if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) {
self.next = Some(Next::Continuation(continuation)); self.next = Some(Next::Continuation(continuation));
} }
}, }
Frame::Settings(v) => { Frame::Settings(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded settings; rem={:?}", self.buf.remaining()); log::trace!("encoded settings; rem={:?}", self.buf.remaining());
}, }
Frame::GoAway(v) => { Frame::GoAway(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded go_away; rem={:?}", self.buf.remaining()); log::trace!("encoded go_away; rem={:?}", self.buf.remaining());
}, }
Frame::Ping(v) => { Frame::Ping(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded ping; rem={:?}", self.buf.remaining()); log::trace!("encoded ping; rem={:?}", self.buf.remaining());
}, }
Frame::WindowUpdate(v) => { Frame::WindowUpdate(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded window_update; rem={:?}", self.buf.remaining()); log::trace!("encoded window_update; rem={:?}", self.buf.remaining());
}, }
Frame::Priority(_) => { Frame::Priority(_) => {
/* /*
@@ -157,18 +159,18 @@ where
log::trace!("encoded priority; rem={:?}", self.buf.remaining()); log::trace!("encoded priority; rem={:?}", self.buf.remaining());
*/ */
unimplemented!(); unimplemented!();
}, }
Frame::Reset(v) => { Frame::Reset(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded reset; rem={:?}", self.buf.remaining()); log::trace!("encoded reset; rem={:?}", self.buf.remaining());
}, }
} }
Ok(()) Ok(())
} }
/// Flush buffered data to the wire /// Flush buffered data to the wire
pub fn flush(&mut self) -> Poll<(), io::Error> { pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
log::trace!("flush"); log::trace!("flush");
loop { loop {
@@ -177,12 +179,12 @@ where
Some(Next::Data(ref mut frame)) => { Some(Next::Data(ref mut frame)) => {
log::trace!(" -> queued data frame"); log::trace!(" -> queued data frame");
let mut buf = Buf::by_ref(&mut self.buf).chain(frame.payload_mut()); let mut buf = Buf::by_ref(&mut self.buf).chain(frame.payload_mut());
try_ready!(self.inner.write_buf(&mut buf)); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?;
}, }
_ => { _ => {
log::trace!(" -> not a queued data frame"); log::trace!(" -> not a queued data frame");
try_ready!(self.inner.write_buf(&mut self.buf)); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?;
}, }
} }
} }
@@ -196,11 +198,10 @@ where
self.last_data_frame = Some(frame); self.last_data_frame = Some(frame);
debug_assert!(self.is_empty()); debug_assert!(self.is_empty());
break; break;
}, }
Some(Next::Continuation(frame)) => { Some(Next::Continuation(frame)) => {
// Buffer the continuation frame, then try to write again // Buffer the continuation frame, then try to write again
if let Some(continuation) = frame.encode(&mut self.hpack, self.buf.get_mut()) { if let Some(continuation) = frame.encode(&mut self.hpack, self.buf.get_mut()) {
// We previously had a CONTINUATION, and after encoding // We previously had a CONTINUATION, and after encoding
// it, we got *another* one? Let's just double check // it, we got *another* one? Let's just double check
// that at least some progress is being made... // that at least some progress is being made...
@@ -213,7 +214,7 @@ where
self.next = Some(Next::Continuation(continuation)); self.next = Some(Next::Continuation(continuation));
} }
}, }
None => { None => {
break; break;
} }
@@ -222,15 +223,15 @@ where
log::trace!("flushing buffer"); log::trace!("flushing buffer");
// Flush the upstream // Flush the upstream
try_nb!(self.inner.flush()); ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
/// Close the codec /// Close the codec
pub fn shutdown(&mut self) -> Poll<(), io::Error> { pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
try_ready!(self.flush()); ready!(self.flush(cx))?;
self.inner.shutdown().map_err(Into::into) Pin::new(&mut self.inner).poll_shutdown(cx)
} }
fn has_capacity(&self) -> bool { fn has_capacity(&self) -> bool {
@@ -267,23 +268,18 @@ impl<T, B> FramedWrite<T, B> {
} }
} }
impl<T: io::Read, B> io::Read for FramedWrite<T, B> { impl<T: AsyncRead + Unpin, B: Unpin> AsyncRead for FramedWrite<T, B> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.inner.read(dst)
}
}
impl<T: AsyncRead, B> AsyncRead for FramedWrite<T, B> {
fn read_buf<B2: BufMut>(&mut self, buf: &mut B2) -> Poll<usize, io::Error>
where
Self: Sized,
{
self.inner.read_buf(buf)
}
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.inner.prepare_uninitialized_buffer(buf) self.inner.prepare_uninitialized_buffer(buf)
} }
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
} }
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]

View File

@@ -14,10 +14,11 @@ use crate::frame::{self, Data, Frame};
use futures::*; use futures::*;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::codec::length_delimited;
use bytes::Buf; use bytes::Buf;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio_codec::length_delimited;
use tokio_io::{AsyncRead, AsyncWrite};
use std::io; use std::io;
@@ -28,8 +29,8 @@ pub struct Codec<T, B> {
impl<T, B> Codec<T, B> impl<T, B> Codec<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
/// Returns a new `Codec` with the default max frame size /// Returns a new `Codec` with the default max frame size
#[inline] #[inline]
@@ -55,9 +56,7 @@ where
// Use FramedRead's method since it checks the value is within range. // Use FramedRead's method since it checks the value is within range.
inner.set_max_frame_size(max_frame_size); inner.set_max_frame_size(max_frame_size);
Codec { Codec { inner }
inner,
}
} }
} }
@@ -121,12 +120,12 @@ impl<T, B> Codec<T, B> {
impl<T, B> Codec<T, B> impl<T, B> Codec<T, B>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
/// Returns `Ready` when the codec can buffer a frame /// Returns `Ready` when the codec can buffer a frame
pub fn poll_ready(&mut self) -> Poll<(), io::Error> { pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
self.framed_write().poll_ready() self.framed_write().poll_ready(cx)
} }
/// Buffer a frame. /// Buffer a frame.
@@ -140,60 +139,59 @@ where
} }
/// Flush buffered data to the wire /// Flush buffered data to the wire
pub fn flush(&mut self) -> Poll<(), io::Error> { pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
self.framed_write().flush() self.framed_write().flush(cx)
} }
/// Shutdown the send half /// Shutdown the send half
pub fn shutdown(&mut self) -> Poll<(), io::Error> { pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
self.framed_write().shutdown() self.framed_write().shutdown(cx)
} }
} }
impl<T, B> Stream for Codec<T, B> impl<T, B> Stream for Codec<T, B>
where where
T: AsyncRead, T: AsyncRead + Unpin,
B: Unpin,
{ {
type Item = Frame; type Item = Result<Frame, RecvError>;
type Error = RecvError;
fn poll(&mut self) -> Poll<Option<Frame>, Self::Error> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.inner.poll() Pin::new(&mut self.inner).poll_next(cx)
} }
} }
impl<T, B> Sink for Codec<T, B> impl<T, B> Sink<Frame<B>> for Codec<T, B>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
type SinkItem = Frame<B>; type Error = SendError;
type SinkError = SendError;
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> { fn start_send(mut self: Pin<&mut Self>, item: Frame<B>) -> Result<(), Self::Error> {
if !self.poll_ready()?.is_ready() { Codec::buffer(&mut self, item)?;
return Ok(AsyncSink::NotReady(item)); Ok(())
} }
/// Returns `Ready` when the codec can buffer a frame
self.buffer(item)?; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(AsyncSink::Ready) self.framed_write().poll_ready(cx).map_err(Into::into)
} }
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { /// Flush buffered data to the wire
self.flush()?; fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(Async::Ready(())) self.framed_write().flush(cx).map_err(Into::into)
} }
fn close(&mut self) -> Poll<(), Self::SinkError> { fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.shutdown()?; ready!(self.shutdown(cx))?;
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
} }
// TODO: remove (or improve) this // TODO: remove (or improve) this
impl<T> From<T> for Codec<T, ::std::io::Cursor<::bytes::Bytes>> impl<T> From<T> for Codec<T, ::std::io::Cursor<::bytes::Bytes>>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
{ {
fn from(src: T) -> Self { fn from(src: T) -> Self {
Self::new(src) Self::new(src)

View File

@@ -81,6 +81,7 @@
#![doc(html_root_url = "https://docs.rs/h2/0.1.25")] #![doc(html_root_url = "https://docs.rs/h2/0.1.25")]
#![deny(missing_debug_implementations, missing_docs)] #![deny(missing_debug_implementations, missing_docs)]
#![cfg_attr(test, deny(warnings))] #![cfg_attr(test, deny(warnings))]
#![feature(async_await)]
macro_rules! proto_err { macro_rules! proto_err {
(conn: $($msg:tt)+) => { (conn: $($msg:tt)+) => {
@@ -91,9 +92,9 @@ macro_rules! proto_err {
}; };
} }
mod error;
#[cfg_attr(feature = "unstable", allow(missing_docs))] #[cfg_attr(feature = "unstable", allow(missing_docs))]
mod codec; mod codec;
mod error;
mod hpack; mod hpack;
mod proto; mod proto;
@@ -109,7 +110,48 @@ pub mod server;
mod share; mod share;
pub use crate::error::{Error, Reason}; pub use crate::error::{Error, Reason};
pub use crate::share::{SendStream, StreamId, RecvStream, ReleaseCapacity, PingPong, Ping, Pong}; pub use crate::share::{Ping, PingPong, Pong, RecvStream, ReleaseCapacity, SendStream, StreamId};
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
pub use codec::{Codec, RecvError, SendError, UserError}; pub use codec::{Codec, RecvError, SendError, UserError};
use std::task::Poll;
// TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512
// is stablized.
trait PollExt<T, E> {
/// Changes the success value of this `Poll` with the closure provided.
fn map_ok_<U, F>(self, f: F) -> Poll<Option<Result<U, E>>>
where
F: FnOnce(T) -> U;
/// Changes the error value of this `Poll` with the closure provided.
fn map_err_<U, F>(self, f: F) -> Poll<Option<Result<T, U>>>
where
F: FnOnce(E) -> U;
}
impl<T, E> PollExt<T, E> for Poll<Option<Result<T, E>>> {
fn map_ok_<U, F>(self, f: F) -> Poll<Option<Result<U, E>>>
where
F: FnOnce(T) -> U,
{
match self {
Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(f(t)))),
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
fn map_err_<U, F>(self, f: F) -> Poll<Option<Result<T, U>>>
where
F: FnOnce(E) -> U,
{
match self {
Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(t))),
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(f(e)))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}

View File

@@ -1,17 +1,18 @@
use crate::{client, frame, proto, server};
use crate::codec::RecvError; use crate::codec::RecvError;
use crate::frame::{Reason, StreamId}; use crate::frame::{Reason, StreamId};
use crate::{client, frame, proto, server};
use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE;
use crate::proto::*; use crate::proto::*;
use bytes::{Bytes, IntoBuf}; use bytes::{Bytes, IntoBuf};
use futures::{Stream, try_ready}; use futures::{ready, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use std::marker::PhantomData;
use std::io; use std::io;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use tokio_io::{AsyncRead, AsyncWrite};
/// An H2 connection /// An H2 connection
#[derive(Debug)] #[derive(Debug)]
@@ -70,16 +71,15 @@ enum State {
impl<T, P, B> Connection<T, P, B> impl<T, P, B> Connection<T, P, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
P: Peer, P: Peer,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
pub fn new( pub fn new(codec: Codec<T, Prioritized<B::Buf>>, config: Config) -> Connection<T, P, B> {
codec: Codec<T, Prioritized<B::Buf>>,
config: Config,
) -> Connection<T, P, B> {
let streams = Streams::new(streams::Config { let streams = Streams::new(streams::Config {
local_init_window_sz: config.settings local_init_window_sz: config
.settings
.initial_window_size() .initial_window_size()
.unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE),
initial_max_send_streams: config.initial_max_send_streams, initial_max_send_streams: config.initial_max_send_streams,
@@ -88,7 +88,8 @@ where
local_reset_duration: config.reset_stream_duration, local_reset_duration: config.reset_stream_duration,
local_reset_max: config.reset_stream_max, local_reset_max: config.reset_stream_max,
remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
remote_max_initiated: config.settings remote_max_initiated: config
.settings
.max_concurrent_streams() .max_concurrent_streams()
.map(|max| max as usize), .map(|max| max as usize),
}); });
@@ -112,25 +113,24 @@ where
/// ///
/// Returns `RecvError` as this may raise errors that are caused by delayed /// Returns `RecvError` as this may raise errors that are caused by delayed
/// processing of received frames. /// processing of received frames.
fn poll_ready(&mut self) -> Poll<(), RecvError> { fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> {
// The order of these calls don't really matter too much // The order of these calls don't really matter too much
try_ready!(self.ping_pong.send_pending_pong(&mut self.codec)); ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?;
try_ready!(self.ping_pong.send_pending_ping(&mut self.codec)); ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?;
try_ready!( ready!(self
self.settings .settings
.send_pending_ack(&mut self.codec, &mut self.streams) .send_pending_ack(cx, &mut self.codec, &mut self.streams))?;
); ready!(self.streams.send_pending_refusal(cx, &mut self.codec))?;
try_ready!(self.streams.send_pending_refusal(&mut self.codec));
Ok(().into()) Poll::Ready(Ok(()))
} }
/// Send any pending GOAWAY frames. /// Send any pending GOAWAY frames.
/// ///
/// This will return `Some(reason)` if the connection should be closed /// This will return `Some(reason)` if the connection should be closed
/// afterwards. If this is a graceful shutdown, this returns `None`. /// afterwards. If this is a graceful shutdown, this returns `None`.
fn poll_go_away(&mut self) -> Poll<Option<Reason>, io::Error> { fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> {
self.go_away.send_pending_go_away(&mut self.codec) self.go_away.send_pending_go_away(cx, &mut self.codec)
} }
fn go_away(&mut self, id: StreamId, e: Reason) { fn go_away(&mut self, id: StreamId, e: Reason) {
@@ -154,7 +154,7 @@ where
self.streams.recv_err(&proto::Error::Proto(e)); self.streams.recv_err(&proto::Error::Proto(e));
} }
fn take_error(&mut self, ours: Reason) -> Poll<(), proto::Error> { fn take_error(&mut self, ours: Reason) -> Poll<Result<(), proto::Error>> {
let reason = if let Some(theirs) = self.error.take() { let reason = if let Some(theirs) = self.error.take() {
match (ours, theirs) { match (ours, theirs) {
// If either side reported an error, return that // If either side reported an error, return that
@@ -171,9 +171,9 @@ where
}; };
if reason == Reason::NO_ERROR { if reason == Reason::NO_ERROR {
Ok(().into()) Poll::Ready(Ok(()))
} else { } else {
Err(proto::Error::Proto(reason)) Poll::Ready(Err(proto::Error::Proto(reason)))
} }
} }
@@ -192,7 +192,7 @@ where
} }
/// Advances the internal state of the connection. /// Advances the internal state of the connection.
pub fn poll(&mut self) -> Poll<(), proto::Error> { pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
use crate::codec::RecvError::*; use crate::codec::RecvError::*;
loop { loop {
@@ -200,15 +200,15 @@ where
match self.state { match self.state {
// When open, continue to poll a frame // When open, continue to poll a frame
State::Open => { State::Open => {
match self.poll2() { match self.poll2(cx) {
// The connection has shutdown normally // The connection has shutdown normally
Ok(Async::Ready(())) => self.state = State::Closing(Reason::NO_ERROR), Poll::Ready(Ok(())) => self.state = State::Closing(Reason::NO_ERROR),
// The connection is not ready to make progress // The connection is not ready to make progress
Ok(Async::NotReady) => { Poll::Pending => {
// Ensure all window updates have been sent. // Ensure all window updates have been sent.
// //
// This will also handle flushing `self.codec` // This will also handle flushing `self.codec`
try_ready!(self.streams.poll_complete(&mut self.codec)); ready!(self.streams.poll_complete(cx, &mut self.codec))?;
if self.error.is_some() || self.go_away.should_close_on_idle() { if self.error.is_some() || self.go_away.should_close_on_idle() {
if !self.streams.has_streams() { if !self.streams.has_streams() {
@@ -217,12 +217,12 @@ where
} }
} }
return Ok(Async::NotReady); return Poll::Pending;
}, }
// Attempting to read a frame resulted in a connection level // Attempting to read a frame resulted in a connection level
// error. This is handled by setting a GOAWAY frame followed by // error. This is handled by setting a GOAWAY frame followed by
// terminating the connection. // terminating the connection.
Err(Connection(e)) => { Poll::Ready(Err(Connection(e))) => {
log::debug!("Connection::poll; connection error={:?}", e); log::debug!("Connection::poll; connection error={:?}", e);
// We may have already sent a GOAWAY for this error, // We may have already sent a GOAWAY for this error,
@@ -238,22 +238,19 @@ where
// Reset all active streams // Reset all active streams
self.streams.recv_err(&e.into()); self.streams.recv_err(&e.into());
self.go_away_now(e); self.go_away_now(e);
}, }
// Attempting to read a frame resulted in a stream level error. // Attempting to read a frame resulted in a stream level error.
// This is handled by resetting the frame then trying to read // This is handled by resetting the frame then trying to read
// another frame. // another frame.
Err(Stream { Poll::Ready(Err(Stream { id, reason })) => {
id,
reason,
}) => {
log::trace!("stream error; id={:?}; reason={:?}", id, reason); log::trace!("stream error; id={:?}; reason={:?}", id, reason);
self.streams.send_reset(id, reason); self.streams.send_reset(id, reason);
}, }
// Attempting to read a frame resulted in an I/O error. All // Attempting to read a frame resulted in an I/O error. All
// active streams must be reset. // active streams must be reset.
// //
// TODO: Are I/O errors recoverable? // TODO: Are I/O errors recoverable?
Err(Io(e)) => { Poll::Ready(Err(Io(e))) => {
log::debug!("Connection::poll; IO error={:?}", e); log::debug!("Connection::poll; IO error={:?}", e);
let e = e.into(); let e = e.into();
@@ -261,24 +258,24 @@ where
self.streams.recv_err(&e); self.streams.recv_err(&e);
// Return the error // Return the error
return Err(e); return Poll::Ready(Err(e));
}, }
} }
} }
State::Closing(reason) => { State::Closing(reason) => {
log::trace!("connection closing after flush"); log::trace!("connection closing after flush");
// Flush/shutdown the codec // Flush/shutdown the codec
try_ready!(self.codec.shutdown()); ready!(self.codec.shutdown(cx))?;
// Transition the state to error // Transition the state to error
self.state = State::Closed(reason); self.state = State::Closed(reason);
}, }
State::Closed(reason) => return self.take_error(reason), State::Closed(reason) => return self.take_error(reason),
} }
} }
} }
fn poll2(&mut self) -> Poll<(), RecvError> { fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> {
use crate::frame::Frame::*; use crate::frame::Frame::*;
// This happens outside of the loop to prevent needing to do a clock // This happens outside of the loop to prevent needing to do a clock
@@ -292,43 +289,51 @@ where
// The order here matters: // The order here matters:
// - poll_go_away may buffer a graceful shutdown GOAWAY frame // - poll_go_away may buffer a graceful shutdown GOAWAY frame
// - If it has, we've also added a PING to be sent in poll_ready // - If it has, we've also added a PING to be sent in poll_ready
if let Some(reason) = try_ready!(self.poll_go_away()) { match ready!(self.poll_go_away(cx)) {
if self.go_away.should_close_now() { Some(Ok(reason)) => {
if self.go_away.is_user_initiated() { if self.go_away.should_close_now() {
// A user initiated abrupt shutdown shouldn't return if self.go_away.is_user_initiated() {
// the same error back to the user. // A user initiated abrupt shutdown shouldn't return
return Ok(Async::Ready(())); // the same error back to the user.
} else { return Poll::Ready(Ok(()));
return Err(RecvError::Connection(reason)); } else {
return Poll::Ready(Err(RecvError::Connection(reason)));
}
} }
// Only NO_ERROR should be waiting for idle
debug_assert_eq!(
reason,
Reason::NO_ERROR,
"graceful GOAWAY should be NO_ERROR"
);
} }
// Only NO_ERROR should be waiting for idle Some(Err(e)) => return Poll::Ready(Err(e.into())),
debug_assert_eq!(reason, Reason::NO_ERROR, "graceful GOAWAY should be NO_ERROR"); None => (),
} }
try_ready!(self.poll_ready()); ready!(self.poll_ready(cx))?;
match try_ready!(self.codec.poll()) { match ready!(Pin::new(&mut self.codec).poll_next(cx)) {
Some(Headers(frame)) => { Some(Ok(Headers(frame))) => {
log::trace!("recv HEADERS; frame={:?}", frame); log::trace!("recv HEADERS; frame={:?}", frame);
self.streams.recv_headers(frame)?; self.streams.recv_headers(frame)?;
}, }
Some(Data(frame)) => { Some(Ok(Data(frame))) => {
log::trace!("recv DATA; frame={:?}", frame); log::trace!("recv DATA; frame={:?}", frame);
self.streams.recv_data(frame)?; self.streams.recv_data(frame)?;
}, }
Some(Reset(frame)) => { Some(Ok(Reset(frame))) => {
log::trace!("recv RST_STREAM; frame={:?}", frame); log::trace!("recv RST_STREAM; frame={:?}", frame);
self.streams.recv_reset(frame)?; self.streams.recv_reset(frame)?;
}, }
Some(PushPromise(frame)) => { Some(Ok(PushPromise(frame))) => {
log::trace!("recv PUSH_PROMISE; frame={:?}", frame); log::trace!("recv PUSH_PROMISE; frame={:?}", frame);
self.streams.recv_push_promise(frame)?; self.streams.recv_push_promise(frame)?;
}, }
Some(Settings(frame)) => { Some(Ok(Settings(frame))) => {
log::trace!("recv SETTINGS; frame={:?}", frame); log::trace!("recv SETTINGS; frame={:?}", frame);
self.settings.recv_settings(frame); self.settings.recv_settings(frame);
}, }
Some(GoAway(frame)) => { Some(Ok(GoAway(frame))) => {
log::trace!("recv GOAWAY; frame={:?}", frame); log::trace!("recv GOAWAY; frame={:?}", frame);
// This should prevent starting new streams, // This should prevent starting new streams,
// but should allow continuing to process current streams // but should allow continuing to process current streams
@@ -336,8 +341,8 @@ where
// transition to GoAway. // transition to GoAway.
self.streams.recv_go_away(&frame)?; self.streams.recv_go_away(&frame)?;
self.error = Some(frame.reason()); self.error = Some(frame.reason());
}, }
Some(Ping(frame)) => { Some(Ok(Ping(frame))) => {
log::trace!("recv PING; frame={:?}", frame); log::trace!("recv PING; frame={:?}", frame);
let status = self.ping_pong.recv_ping(frame); let status = self.ping_pong.recv_ping(frame);
if status.is_shutdown() { if status.is_shutdown() {
@@ -349,21 +354,21 @@ where
let last_processed_id = self.streams.last_processed_id(); let last_processed_id = self.streams.last_processed_id();
self.go_away(last_processed_id, Reason::NO_ERROR); self.go_away(last_processed_id, Reason::NO_ERROR);
} }
}, }
Some(WindowUpdate(frame)) => { Some(Ok(WindowUpdate(frame))) => {
log::trace!("recv WINDOW_UPDATE; frame={:?}", frame); log::trace!("recv WINDOW_UPDATE; frame={:?}", frame);
self.streams.recv_window_update(frame)?; self.streams.recv_window_update(frame)?;
}, }
Some(Priority(frame)) => { Some(Ok(Priority(frame))) => {
log::trace!("recv PRIORITY; frame={:?}", frame); log::trace!("recv PRIORITY; frame={:?}", frame);
// TODO: handle // TODO: handle
}, }
Some(Err(e)) => return Poll::Ready(Err(e)),
None => { None => {
log::trace!("codec closed"); log::trace!("codec closed");
self.streams.recv_eof(false) self.streams.recv_eof(false).ok().expect("mutex poisoned");
.ok().expect("mutex poisoned"); return Poll::Ready(Ok(()));
return Ok(Async::Ready(())); }
},
} }
} }
} }
@@ -385,8 +390,9 @@ where
impl<T, B> Connection<T, server::Peer, B> impl<T, B> Connection<T, server::Peer, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
pub fn next_incoming(&mut self) -> Option<StreamRef<B::Buf>> { pub fn next_incoming(&mut self) -> Option<StreamRef<B::Buf>> {
self.streams.next_incoming() self.streams.next_incoming()

View File

@@ -2,8 +2,8 @@ use crate::codec::Codec;
use crate::frame::{self, Reason, StreamId}; use crate::frame::{self, Reason, StreamId};
use bytes::Buf; use bytes::Buf;
use futures::{Async, Poll};
use std::io; use std::io;
use std::task::{Context, Poll};
use tokio_io::AsyncWrite; use tokio_io::AsyncWrite;
/// Manages our sending of GOAWAY frames. /// Manages our sending of GOAWAY frames.
@@ -59,7 +59,7 @@ impl GoAway {
assert!( assert!(
f.last_stream_id() <= going_away.last_processed_id, f.last_stream_id() <= going_away.last_processed_id,
"GOAWAY stream IDs shouldn't be higher; \ "GOAWAY stream IDs shouldn't be higher; \
last_processed_id = {:?}, f.last_stream_id() = {:?}", last_processed_id = {:?}, f.last_stream_id() = {:?}",
going_away.last_processed_id, going_away.last_processed_id,
f.last_stream_id(), f.last_stream_id(),
); );
@@ -76,8 +76,8 @@ impl GoAway {
self.close_now = true; self.close_now = true;
if let Some(ref going_away) = self.going_away { if let Some(ref going_away) = self.going_away {
// Prevent sending the same GOAWAY twice. // Prevent sending the same GOAWAY twice.
if going_away.last_processed_id == f.last_stream_id() if going_away.last_processed_id == f.last_stream_id() && going_away.reason == f.reason()
&& going_away.reason == f.reason() { {
return; return;
} }
} }
@@ -100,9 +100,7 @@ impl GoAway {
/// Return the last Reason we've sent. /// Return the last Reason we've sent.
pub fn going_away_reason(&self) -> Option<Reason> { pub fn going_away_reason(&self) -> Option<Reason> {
self.going_away self.going_away.as_ref().map(|g| g.reason)
.as_ref()
.map(|g| g.reason)
} }
/// Returns if the connection should close now, or wait until idle. /// Returns if the connection should close now, or wait until idle.
@@ -112,36 +110,43 @@ impl GoAway {
/// Returns if the connection should be closed when idle. /// Returns if the connection should be closed when idle.
pub fn should_close_on_idle(&self) -> bool { pub fn should_close_on_idle(&self) -> bool {
!self.close_now && self.going_away !self.close_now
.as_ref() && self
.map(|g| g.last_processed_id != StreamId::MAX) .going_away
.unwrap_or(false) .as_ref()
.map(|g| g.last_processed_id != StreamId::MAX)
.unwrap_or(false)
} }
/// Try to write a pending GOAWAY frame to the buffer. /// Try to write a pending GOAWAY frame to the buffer.
/// ///
/// If a frame is written, the `Reason` of the GOAWAY is returned. /// If a frame is written, the `Reason` of the GOAWAY is returned.
pub fn send_pending_go_away<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<Option<Reason>, io::Error> pub fn send_pending_go_away<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<Option<io::Result<Reason>>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
if let Some(frame) = self.pending.take() { if let Some(frame) = self.pending.take() {
if !dst.poll_ready()?.is_ready() { if !dst.poll_ready(cx)?.is_ready() {
self.pending = Some(frame); self.pending = Some(frame);
return Ok(Async::NotReady); return Poll::Pending;
} }
let reason = frame.reason(); let reason = frame.reason();
dst.buffer(frame.into()) dst.buffer(frame.into()).ok().expect("invalid GOAWAY frame");
.ok()
.expect("invalid GOAWAY frame");
return Ok(Async::Ready(Some(reason))); return Poll::Ready(Some(Ok(reason)));
} else if self.should_close_now() { } else if self.should_close_now() {
return Ok(Async::Ready(self.going_away_reason())); return match self.going_away_reason() {
Some(reason) => Poll::Ready(Some(Ok(reason))),
None => Poll::Ready(None),
};
} }
Ok(Async::Ready(None)) Poll::Ready(None)
} }
} }

View File

@@ -8,10 +8,10 @@ mod streams;
pub(crate) use self::connection::{Config, Connection}; pub(crate) use self::connection::{Config, Connection};
pub(crate) use self::error::Error; pub(crate) use self::error::Error;
pub(crate) use self::peer::{Peer, Dyn as DynPeer}; pub(crate) use self::peer::{Dyn as DynPeer, Peer};
pub(crate) use self::ping_pong::UserPings; pub(crate) use self::ping_pong::UserPings;
pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams}; pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams};
pub(crate) use self::streams::{PollReset, Prioritized, Open}; pub(crate) use self::streams::{Open, PollReset, Prioritized};
use crate::codec::Codec; use crate::codec::Codec;
@@ -21,9 +21,6 @@ use self::settings::Settings;
use crate::frame::{self, Frame}; use crate::frame::{self, Frame};
use futures::{task, Async, Poll};
use futures::task::Task;
use bytes::Buf; use bytes::Buf;
use tokio_io::AsyncWrite; use tokio_io::AsyncWrite;

View File

@@ -3,11 +3,11 @@ use crate::frame::Ping;
use crate::proto::{self, PingPayload}; use crate::proto::{self, PingPayload};
use bytes::Buf; use bytes::Buf;
use futures::{Async, Poll}; use futures::task::AtomicWaker;
use futures::task::AtomicTask;
use std::io; use std::io;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use tokio_io::AsyncWrite; use tokio_io::AsyncWrite;
/// Acknowledges ping requests from the remote. /// Acknowledges ping requests from the remote.
@@ -28,9 +28,9 @@ struct UserPingsRx(Arc<UserPingsInner>);
struct UserPingsInner { struct UserPingsInner {
state: AtomicUsize, state: AtomicUsize,
/// Task to wake up the main `Connection`. /// Task to wake up the main `Connection`.
ping_task: AtomicTask, ping_task: AtomicWaker,
/// Task to wake up `share::PingPong::poll_pong`. /// Task to wake up `share::PingPong::poll_pong`.
pong_task: AtomicTask, pong_task: AtomicWaker,
} }
#[derive(Debug)] #[derive(Debug)]
@@ -77,8 +77,8 @@ impl PingPong {
let user_pings = Arc::new(UserPingsInner { let user_pings = Arc::new(UserPingsInner {
state: AtomicUsize::new(USER_STATE_EMPTY), state: AtomicUsize::new(USER_STATE_EMPTY),
ping_task: AtomicTask::new(), ping_task: AtomicWaker::new(),
pong_task: AtomicTask::new(), pong_task: AtomicWaker::new(),
}); });
self.user_pings = Some(UserPingsRx(user_pings.clone())); self.user_pings = Some(UserPingsRx(user_pings.clone()));
Some(UserPings(user_pings)) Some(UserPings(user_pings))
@@ -135,34 +135,42 @@ impl PingPong {
} }
/// Send any pending pongs. /// Send any pending pongs.
pub(crate) fn send_pending_pong<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<(), io::Error> pub(crate) fn send_pending_pong<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
if let Some(pong) = self.pending_pong.take() { if let Some(pong) = self.pending_pong.take() {
if !dst.poll_ready()?.is_ready() { if !dst.poll_ready(cx)?.is_ready() {
self.pending_pong = Some(pong); self.pending_pong = Some(pong);
return Ok(Async::NotReady); return Poll::Pending;
} }
dst.buffer(Ping::pong(pong).into()) dst.buffer(Ping::pong(pong).into())
.expect("invalid pong frame"); .expect("invalid pong frame");
} }
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
/// Send any pending pings. /// Send any pending pings.
pub(crate) fn send_pending_ping<T, B>(&mut self, dst: &mut Codec<T, B>) -> Poll<(), io::Error> pub(crate) fn send_pending_ping<T, B>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, B>,
) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
if let Some(ref mut ping) = self.pending_ping { if let Some(ref mut ping) = self.pending_ping {
if !ping.sent { if !ping.sent {
if !dst.poll_ready()?.is_ready() { if !dst.poll_ready(cx)?.is_ready() {
return Ok(Async::NotReady); return Poll::Pending;
} }
dst.buffer(Ping::new(ping.payload).into()) dst.buffer(Ping::new(ping.payload).into())
@@ -171,19 +179,22 @@ impl PingPong {
} }
} else if let Some(ref users) = self.user_pings { } else if let Some(ref users) = self.user_pings {
if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING { if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING {
if !dst.poll_ready()?.is_ready() { if !dst.poll_ready(cx)?.is_ready() {
return Ok(Async::NotReady); return Poll::Pending;
} }
dst.buffer(Ping::new(Ping::USER).into()) dst.buffer(Ping::new(Ping::USER).into())
.expect("invalid ping frame"); .expect("invalid ping frame");
users.0.state.store(USER_STATE_PENDING_PONG, Ordering::Release); users
.0
.state
.store(USER_STATE_PENDING_PONG, Ordering::Release);
} else { } else {
users.0.ping_task.register(); users.0.ping_task.register(cx.waker());
} }
} }
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
} }
@@ -201,19 +212,17 @@ impl ReceivedPing {
impl UserPings { impl UserPings {
pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> { pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> {
let prev = self.0.state.compare_and_swap( let prev = self.0.state.compare_and_swap(
USER_STATE_EMPTY, // current USER_STATE_EMPTY, // current
USER_STATE_PENDING_PING, // new USER_STATE_PENDING_PING, // new
Ordering::AcqRel, Ordering::AcqRel,
); );
match prev { match prev {
USER_STATE_EMPTY => { USER_STATE_EMPTY => {
self.0.ping_task.notify(); self.0.ping_task.wake();
Ok(()) Ok(())
},
USER_STATE_CLOSED => {
Err(Some(broken_pipe().into()))
} }
USER_STATE_CLOSED => Err(Some(broken_pipe().into())),
_ => { _ => {
// Was already pending, user error! // Was already pending, user error!
Err(None) Err(None)
@@ -221,20 +230,20 @@ impl UserPings {
} }
} }
pub(crate) fn poll_pong(&self) -> Poll<(), proto::Error> { pub(crate) fn poll_pong(&self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
// Must register before checking state, in case state were to change // Must register before checking state, in case state were to change
// before we could register, and then the ping would just be lost. // before we could register, and then the ping would just be lost.
self.0.pong_task.register(); self.0.pong_task.register(cx.waker());
let prev = self.0.state.compare_and_swap( let prev = self.0.state.compare_and_swap(
USER_STATE_RECEIVED_PONG, // current USER_STATE_RECEIVED_PONG, // current
USER_STATE_EMPTY, // new USER_STATE_EMPTY, // new
Ordering::AcqRel, Ordering::AcqRel,
); );
match prev { match prev {
USER_STATE_RECEIVED_PONG => Ok(Async::Ready(())), USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())),
USER_STATE_CLOSED => Err(broken_pipe().into()), USER_STATE_CLOSED => Poll::Ready(Err(broken_pipe().into())),
_ => Ok(Async::NotReady), _ => Poll::Pending,
} }
} }
} }
@@ -244,13 +253,13 @@ impl UserPings {
impl UserPingsRx { impl UserPingsRx {
fn receive_pong(&self) -> bool { fn receive_pong(&self) -> bool {
let prev = self.0.state.compare_and_swap( let prev = self.0.state.compare_and_swap(
USER_STATE_PENDING_PONG, // current USER_STATE_PENDING_PONG, // current
USER_STATE_RECEIVED_PONG, // new USER_STATE_RECEIVED_PONG, // new
Ordering::AcqRel, Ordering::AcqRel,
); );
if prev == USER_STATE_PENDING_PONG { if prev == USER_STATE_PENDING_PONG {
self.0.pong_task.notify(); self.0.pong_task.wake();
true true
} else { } else {
false false
@@ -261,7 +270,7 @@ impl UserPingsRx {
impl Drop for UserPingsRx { impl Drop for UserPingsRx {
fn drop(&mut self) { fn drop(&mut self) {
self.0.state.store(USER_STATE_CLOSED, Ordering::Release); self.0.state.store(USER_STATE_CLOSED, Ordering::Release);
self.0.pong_task.notify(); self.0.pong_task.wake();
} }
} }

View File

@@ -1,6 +1,7 @@
use crate::codec::RecvError; use crate::codec::RecvError;
use crate::frame; use crate::frame;
use crate::proto::*; use crate::proto::*;
use std::task::{Poll, Context};
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Settings { pub(crate) struct Settings {
@@ -29,21 +30,22 @@ impl Settings {
pub fn send_pending_ack<T, B, C, P>( pub fn send_pending_ack<T, B, C, P>(
&mut self, &mut self,
cx: &mut Context,
dst: &mut Codec<T, B>, dst: &mut Codec<T, B>,
streams: &mut Streams<C, P>, streams: &mut Streams<C, P>,
) -> Poll<(), RecvError> ) -> Poll<Result<(), RecvError>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
C: Buf, C: Buf + Unpin,
P: Peer, P: Peer,
{ {
log::trace!("send_pending_ack; pending={:?}", self.pending); log::trace!("send_pending_ack; pending={:?}", self.pending);
if let Some(ref settings) = self.pending { if let Some(settings) = &self.pending {
if !dst.poll_ready()?.is_ready() { if !dst.poll_ready(cx)?.is_ready() {
log::trace!("failed to send ACK"); log::trace!("failed to send ACK");
return Ok(Async::NotReady); return Poll::Pending;
} }
// Create an ACK settings frame // Create an ACK settings frame
@@ -65,6 +67,6 @@ impl Settings {
self.pending = None; self.pending = None;
Ok(().into()) Poll::Ready(Ok(()))
} }
} }

View File

@@ -7,10 +7,10 @@ use crate::codec::UserError;
use crate::codec::UserError::*; use crate::codec::UserError::*;
use bytes::buf::Take; use bytes::buf::Take;
use futures::try_ready; use futures::ready;
use std::{cmp, fmt, mem}; use std::{cmp, fmt, mem};
use std::io; use std::io;
use std::task::{Context, Poll, Waker};
/// # Warning /// # Warning
/// ///
@@ -104,14 +104,14 @@ impl Prioritize {
frame: Frame<B>, frame: Frame<B>,
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
task: &mut Option<Task>, task: &mut Option<Waker>,
) { ) {
// Queue the frame in the buffer // Queue the frame in the buffer
stream.pending_send.push_back(buffer, frame); stream.pending_send.push_back(buffer, frame);
self.schedule_send(stream, task); self.schedule_send(stream, task);
} }
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Task>) { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
// If the stream is waiting to be opened, nothing more to do. // If the stream is waiting to be opened, nothing more to do.
if !stream.is_pending_open { if !stream.is_pending_open {
log::trace!("schedule_send; {:?}", stream.id); log::trace!("schedule_send; {:?}", stream.id);
@@ -120,7 +120,7 @@ impl Prioritize {
// Notify the connection. // Notify the connection.
if let Some(task) = task.take() { if let Some(task) = task.take() {
task.notify(); task.wake();
} }
} }
} }
@@ -136,7 +136,7 @@ impl Prioritize {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), UserError> ) -> Result<(), UserError>
where where
B: Buf, B: Buf,
@@ -483,17 +483,18 @@ impl Prioritize {
pub fn poll_complete<T, B>( pub fn poll_complete<T, B>(
&mut self, &mut self,
cx: &mut Context,
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
store: &mut Store, store: &mut Store,
counts: &mut Counts, counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
// Ensure codec is ready // Ensure codec is ready
try_ready!(dst.poll_ready()); ready!(dst.poll_ready(cx))?;
// Reclaim any frame that has previously been written // Reclaim any frame that has previously been written
self.reclaim_frame(buffer, store, dst); self.reclaim_frame(buffer, store, dst);
@@ -517,18 +518,18 @@ impl Prioritize {
dst.buffer(frame).ok().expect("invalid frame"); dst.buffer(frame).ok().expect("invalid frame");
// Ensure the codec is ready to try the loop again. // Ensure the codec is ready to try the loop again.
try_ready!(dst.poll_ready()); ready!(dst.poll_ready(cx))?;
// Because, always try to reclaim... // Because, always try to reclaim...
self.reclaim_frame(buffer, store, dst); self.reclaim_frame(buffer, store, dst);
}, },
None => { None => {
// Try to flush the codec. // Try to flush the codec.
try_ready!(dst.flush()); ready!(dst.flush(cx))?;
// This might release a data frame... // This might release a data frame...
if !self.reclaim_frame(buffer, store, dst) { if !self.reclaim_frame(buffer, store, dst) {
return Ok(().into()); return Poll::Ready(Ok(()))
} }
// No need to poll ready as poll_complete() does this for // No need to poll ready as poll_complete() does this for

View File

@@ -1,13 +1,15 @@
use std::task::Context;
use super::*; use super::*;
use crate::{frame, proto}; use crate::{frame, proto};
use crate::codec::{RecvError, UserError}; use crate::codec::{RecvError, UserError};
use crate::frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE}; use crate::frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE};
use http::{HeaderMap, Response, Request, Method}; use http::{HeaderMap, Response, Request, Method};
use futures::try_ready; use futures::ready;
use std::io; use std::io;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::task::{Poll, Waker};
#[derive(Debug)] #[derive(Debug)]
pub(super) struct Recv { pub(super) struct Recv {
@@ -257,15 +259,17 @@ impl Recv {
/// Called by the client to get pushed response /// Called by the client to get pushed response
pub fn poll_pushed( pub fn poll_pushed(
&mut self, stream: &mut store::Ptr &mut self,
) -> Poll<Option<(Request<()>, store::Key)>, proto::Error> { cx: &Context,
stream: &mut store::Ptr
) -> Poll<Option<Result<(Request<()>, store::Key), proto::Error>>> {
use super::peer::PollMessage::*; use super::peer::PollMessage::*;
let mut ppp = stream.pending_push_promises.take(); let mut ppp = stream.pending_push_promises.take();
let pushed = ppp.pop(stream.store_mut()).map( let pushed = ppp.pop(stream.store_mut()).map(
|mut pushed| match pushed.pending_recv.pop_front(&mut self.buffer) { |mut pushed| match pushed.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Headers(Server(headers))) => Some(Event::Headers(Server(headers))) =>
Async::Ready(Some((headers, pushed.key()))), (headers, pushed.key()),
// When frames are pushed into the queue, it is verified that // When frames are pushed into the queue, it is verified that
// the first frame is a HEADERS frame. // the first frame is a HEADERS frame.
_ => panic!("Headers not set on pushed stream") _ => panic!("Headers not set on pushed stream")
@@ -273,15 +277,15 @@ impl Recv {
); );
stream.pending_push_promises = ppp; stream.pending_push_promises = ppp;
if let Some(p) = pushed { if let Some(p) = pushed {
Ok(p) Poll::Ready(Some(Ok(p)))
} else { } else {
let is_open = stream.state.ensure_recv_open()?; let is_open = stream.state.ensure_recv_open()?;
if is_open { if is_open {
stream.recv_task = Some(task::current()); stream.recv_task = Some(cx.waker().clone());
Ok(Async::NotReady) Poll::Pending
} else { } else {
Ok(Async::Ready(None)) Poll::Ready(None)
} }
} }
} }
@@ -289,20 +293,21 @@ impl Recv {
/// Called by the client to get the response /// Called by the client to get the response
pub fn poll_response( pub fn poll_response(
&mut self, &mut self,
cx: &Context,
stream: &mut store::Ptr, stream: &mut store::Ptr,
) -> Poll<Response<()>, proto::Error> { ) -> Poll<Result<Response<()>, proto::Error>> {
use super::peer::PollMessage::*; use super::peer::PollMessage::*;
// If the buffer is not empty, then the first frame must be a HEADERS // If the buffer is not empty, then the first frame must be a HEADERS
// frame or the user violated the contract. // frame or the user violated the contract.
match stream.pending_recv.pop_front(&mut self.buffer) { match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Headers(Client(response))) => Ok(response.into()), Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response.into())),
Some(_) => panic!("poll_response called after response returned"), Some(_) => panic!("poll_response called after response returned"),
None => { None => {
stream.state.ensure_recv_open()?; stream.state.ensure_recv_open()?;
stream.recv_task = Some(task::current()); stream.recv_task = Some(cx.waker().clone());
Ok(Async::NotReady) Poll::Pending
}, },
} }
} }
@@ -339,7 +344,7 @@ impl Recv {
pub fn release_connection_capacity( pub fn release_connection_capacity(
&mut self, &mut self,
capacity: WindowSize, capacity: WindowSize,
task: &mut Option<Task>, task: &mut Option<Waker>,
) { ) {
log::trace!( log::trace!(
"release_connection_capacity; size={}, connection in_flight_data={}", "release_connection_capacity; size={}, connection in_flight_data={}",
@@ -355,7 +360,7 @@ impl Recv {
if self.flow.unclaimed_capacity().is_some() { if self.flow.unclaimed_capacity().is_some() {
if let Some(task) = task.take() { if let Some(task) = task.take() {
task.notify(); task.wake();
} }
} }
} }
@@ -365,7 +370,7 @@ impl Recv {
&mut self, &mut self,
capacity: WindowSize, capacity: WindowSize,
stream: &mut store::Ptr, stream: &mut store::Ptr,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), UserError> { ) -> Result<(), UserError> {
log::trace!("release_capacity; size={}", capacity); log::trace!("release_capacity; size={}", capacity);
@@ -387,7 +392,7 @@ impl Recv {
self.pending_window_updates.push(stream); self.pending_window_updates.push(stream);
if let Some(task) = task.take() { if let Some(task) = task.take() {
task.notify(); task.wake();
} }
} }
@@ -398,7 +403,7 @@ impl Recv {
pub fn release_closed_capacity( pub fn release_closed_capacity(
&mut self, &mut self,
stream: &mut store::Ptr, stream: &mut store::Ptr,
task: &mut Option<Task>, task: &mut Option<Waker>,
) { ) {
debug_assert_eq!(stream.ref_count, 0); debug_assert_eq!(stream.ref_count, 0);
@@ -433,7 +438,7 @@ impl Recv {
/// ///
/// The `task` is an optional parked task for the `Connection` that might /// The `task` is an optional parked task for the `Connection` that might
/// be blocked on needing more window capacity. /// be blocked on needing more window capacity.
pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option<Task>) { pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option<Waker>) {
log::trace!( log::trace!(
"set_target_connection_window; target={}; available={}, reserved={}", "set_target_connection_window; target={}; available={}, reserved={}",
target, target,
@@ -458,7 +463,7 @@ impl Recv {
// a connection WINDOW_UPDATE. // a connection WINDOW_UPDATE.
if self.flow.unclaimed_capacity().is_some() { if self.flow.unclaimed_capacity().is_some() {
if let Some(task) = task.take() { if let Some(task) = task.take() {
task.notify(); task.wake();
} }
} }
} }
@@ -824,14 +829,15 @@ impl Recv {
/// Send any pending refusals. /// Send any pending refusals.
pub fn send_pending_refusal<T, B>( pub fn send_pending_refusal<T, B>(
&mut self, &mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
if let Some(stream_id) = self.refused { if let Some(stream_id) = self.refused {
try_ready!(dst.poll_ready()); ready!(dst.poll_ready(cx))?;
// Create the RST_STREAM frame // Create the RST_STREAM frame
let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM); let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM);
@@ -844,7 +850,7 @@ impl Recv {
self.refused = None; self.refused = None;
Ok(Async::Ready(())) Poll::Ready(Ok(()))
} }
pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) {
@@ -894,37 +900,39 @@ impl Recv {
pub fn poll_complete<T, B>( pub fn poll_complete<T, B>(
&mut self, &mut self,
cx: &mut Context,
store: &mut Store, store: &mut Store,
counts: &mut Counts, counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
// Send any pending connection level window updates // Send any pending connection level window updates
try_ready!(self.send_connection_window_update(dst)); ready!(self.send_connection_window_update(cx, dst))?;
// Send any pending stream level window updates // Send any pending stream level window updates
try_ready!(self.send_stream_window_updates(store, counts, dst)); ready!(self.send_stream_window_updates(cx, store, counts, dst))?;
Ok(().into()) Poll::Ready(Ok(()))
} }
/// Send connection level window update /// Send connection level window update
fn send_connection_window_update<T, B>( fn send_connection_window_update<T, B>(
&mut self, &mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
if let Some(incr) = self.flow.unclaimed_capacity() { if let Some(incr) = self.flow.unclaimed_capacity() {
let frame = frame::WindowUpdate::new(StreamId::zero(), incr); let frame = frame::WindowUpdate::new(StreamId::zero(), incr);
// Ensure the codec has capacity // Ensure the codec has capacity
try_ready!(dst.poll_ready()); ready!(dst.poll_ready(cx))?;
// Buffer the WINDOW_UPDATE frame // Buffer the WINDOW_UPDATE frame
dst.buffer(frame.into()) dst.buffer(frame.into())
@@ -938,28 +946,29 @@ impl Recv {
.expect("unexpected flow control state"); .expect("unexpected flow control state");
} }
Ok(().into()) Poll::Ready(Ok(()))
} }
/// Send stream level window update /// Send stream level window update
pub fn send_stream_window_updates<T, B>( pub fn send_stream_window_updates<T, B>(
&mut self, &mut self,
cx: &mut Context,
store: &mut Store, store: &mut Store,
counts: &mut Counts, counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
loop { loop {
// Ensure the codec has capacity // Ensure the codec has capacity
try_ready!(dst.poll_ready()); ready!(dst.poll_ready(cx))?;
// Get the next stream // Get the next stream
let stream = match self.pending_window_updates.pop(store) { let stream = match self.pending_window_updates.pop(store) {
Some(stream) => stream, Some(stream) => stream,
None => return Ok(().into()), None => return Poll::Ready(Ok(())),
}; };
counts.transition(stream, |_, stream| { counts.transition(stream, |_, stream| {
@@ -1001,10 +1010,10 @@ impl Recv {
self.pending_accept.pop(store).map(|ptr| ptr.key()) self.pending_accept.pop(store).map(|ptr| ptr.key())
} }
pub fn poll_data(&mut self, stream: &mut Stream) -> Poll<Option<Bytes>, proto::Error> { pub fn poll_data(&mut self, cx: &Context, stream: &mut Stream) -> Poll<Option<Result<Bytes, proto::Error>>> {
// TODO: Return error when the stream is reset // TODO: Return error when the stream is reset
match stream.pending_recv.pop_front(&mut self.buffer) { match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Data(payload)) => Ok(Some(payload).into()), Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))),
Some(event) => { Some(event) => {
// Frame is trailer // Frame is trailer
stream.pending_recv.push_front(&mut self.buffer, event); stream.pending_recv.push_front(&mut self.buffer, event);
@@ -1020,36 +1029,37 @@ impl Recv {
stream.notify_recv(); stream.notify_recv();
// No more data frames // No more data frames
Ok(None.into()) Poll::Ready(None)
}, },
None => self.schedule_recv(stream), None => self.schedule_recv(cx, stream),
} }
} }
pub fn poll_trailers( pub fn poll_trailers(
&mut self, &mut self,
cx: &Context,
stream: &mut Stream, stream: &mut Stream,
) -> Poll<Option<HeaderMap>, proto::Error> { ) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
match stream.pending_recv.pop_front(&mut self.buffer) { match stream.pending_recv.pop_front(&mut self.buffer) {
Some(Event::Trailers(trailers)) => Ok(Some(trailers).into()), Some(Event::Trailers(trailers)) => Poll::Ready(Some(Ok(trailers))),
Some(event) => { Some(event) => {
// Frame is not trailers.. not ready to poll trailers yet. // Frame is not trailers.. not ready to poll trailers yet.
stream.pending_recv.push_front(&mut self.buffer, event); stream.pending_recv.push_front(&mut self.buffer, event);
Ok(Async::NotReady) Poll::Pending
}, },
None => self.schedule_recv(stream), None => self.schedule_recv(cx, stream),
} }
} }
fn schedule_recv<T>(&mut self, stream: &mut Stream) -> Poll<Option<T>, proto::Error> { fn schedule_recv<T>(&mut self, cx: &Context, stream: &mut Stream) -> Poll<Option<Result<T, proto::Error>>> {
if stream.state.ensure_recv_open()? { if stream.state.ensure_recv_open()? {
// Request to get notified once more frames arrive // Request to get notified once more frames arrive
stream.recv_task = Some(task::current()); stream.recv_task = Some(cx.waker().clone());
Ok(Async::NotReady) Poll::Pending
} else { } else {
// No more frames will be received // No more frames will be received
Ok(None.into()) Poll::Ready(None)
} }
} }
} }

View File

@@ -1,14 +1,13 @@
use super::{
store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId,
StreamIdOverflow, WindowSize,
};
use crate::codec::{RecvError, UserError}; use crate::codec::{RecvError, UserError};
use crate::frame::{self, Reason}; use crate::frame::{self, Reason};
use super::{
store, Buffer, Codec, Config, Counts, Frame, Prioritize,
Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize,
};
use bytes::Buf; use bytes::Buf;
use http; use http;
use futures::{Async, Poll}; use std::task::{Context, Poll, Waker};
use futures::task::Task;
use tokio_io::AsyncWrite; use tokio_io::AsyncWrite;
use std::io; use std::io;
@@ -60,7 +59,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), UserError> { ) -> Result<(), UserError> {
log::trace!( log::trace!(
"send_headers; frame={:?}; init_window={:?}", "send_headers; frame={:?}; init_window={:?}",
@@ -81,7 +80,6 @@ impl Send {
if te != "trailers" { if te != "trailers" {
log::debug!("illegal connection-specific headers found"); log::debug!("illegal connection-specific headers found");
return Err(UserError::MalformedHeaders); return Err(UserError::MalformedHeaders);
} }
} }
@@ -103,7 +101,8 @@ impl Send {
} }
// Queue the frame for sending // Queue the frame for sending
self.prioritize.queue_frame(frame.into(), buffer, stream, task); self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
Ok(()) Ok(())
} }
@@ -115,7 +114,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) { ) {
let is_reset = stream.state.is_reset(); let is_reset = stream.state.is_reset();
let is_closed = stream.state.is_closed(); let is_closed = stream.state.is_closed();
@@ -125,7 +124,7 @@ impl Send {
"send_reset(..., reason={:?}, stream={:?}, ..., \ "send_reset(..., reason={:?}, stream={:?}, ..., \
is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
state={:?} \ state={:?} \
", ",
reason, reason,
stream.id, stream.id,
is_reset, is_reset,
@@ -151,7 +150,7 @@ impl Send {
if is_closed && is_empty { if is_closed && is_empty {
log::trace!( log::trace!(
" -> not sending explicit RST_STREAM ({:?} was closed \ " -> not sending explicit RST_STREAM ({:?} was closed \
and send queue was flushed)", and send queue was flushed)",
stream.id stream.id
); );
return; return;
@@ -166,7 +165,8 @@ impl Send {
let frame = frame::Reset::new(stream.id, reason); let frame = frame::Reset::new(stream.id, reason);
log::trace!("send_reset -- queueing; frame={:?}", frame); log::trace!("send_reset -- queueing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task); self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
self.prioritize.reclaim_all_capacity(stream, counts); self.prioritize.reclaim_all_capacity(stream, counts);
} }
@@ -175,7 +175,7 @@ impl Send {
stream: &mut store::Ptr, stream: &mut store::Ptr,
reason: Reason, reason: Reason,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) { ) {
if stream.state.is_closed() { if stream.state.is_closed() {
// Stream is already closed, nothing more to do // Stream is already closed, nothing more to do
@@ -194,11 +194,13 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), UserError> ) -> Result<(), UserError>
where B: Buf, where
B: Buf,
{ {
self.prioritize.send_data(frame, buffer, stream, counts, task) self.prioritize
.send_data(frame, buffer, stream, counts, task)
} }
pub fn send_trailers<B>( pub fn send_trailers<B>(
@@ -207,7 +209,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), UserError> { ) -> Result<(), UserError> {
// TODO: Should this logic be moved into state.rs? // TODO: Should this logic be moved into state.rs?
if !stream.state.is_send_streaming() { if !stream.state.is_send_streaming() {
@@ -221,7 +223,8 @@ impl Send {
stream.state.send_close(); stream.state.send_close();
log::trace!("send_trailers -- queuing; frame={:?}", frame); log::trace!("send_trailers -- queuing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task); self.prioritize
.queue_frame(frame.into(), buffer, stream, task);
// Release any excess capacity // Release any excess capacity
self.prioritize.reserve_capacity(0, stream, counts); self.prioritize.reserve_capacity(0, stream, counts);
@@ -231,15 +234,18 @@ impl Send {
pub fn poll_complete<T, B>( pub fn poll_complete<T, B>(
&mut self, &mut self,
cx: &mut Context,
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
store: &mut Store, store: &mut Store,
counts: &mut Counts, counts: &mut Counts,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where T: AsyncWrite, where
B: Buf, T: AsyncWrite + Unpin,
B: Buf + Unpin,
{ {
self.prioritize.poll_complete(buffer, store, counts, dst) self.prioritize
.poll_complete(cx, buffer, store, counts, dst)
} }
/// Request capacity to send data /// Request capacity to send data
@@ -247,27 +253,28 @@ impl Send {
&mut self, &mut self,
capacity: WindowSize, capacity: WindowSize,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts) counts: &mut Counts,
{ ) {
self.prioritize.reserve_capacity(capacity, stream, counts) self.prioritize.reserve_capacity(capacity, stream, counts)
} }
pub fn poll_capacity( pub fn poll_capacity(
&mut self, &mut self,
cx: &Context,
stream: &mut store::Ptr, stream: &mut store::Ptr,
) -> Poll<Option<WindowSize>, UserError> { ) -> Poll<Option<Result<WindowSize, UserError>>> {
if !stream.state.is_send_streaming() { if !stream.state.is_send_streaming() {
return Ok(Async::Ready(None)); return Poll::Ready(None);
} }
if !stream.send_capacity_inc { if !stream.send_capacity_inc {
stream.wait_send(); stream.wait_send(cx);
return Ok(Async::NotReady); return Poll::Pending;
} }
stream.send_capacity_inc = false; stream.send_capacity_inc = false;
Ok(Async::Ready(Some(self.capacity(stream)))) Poll::Ready(Some(Ok(self.capacity(stream))))
} }
/// Current available stream send capacity /// Current available stream send capacity
@@ -284,15 +291,16 @@ impl Send {
pub fn poll_reset( pub fn poll_reset(
&self, &self,
cx: &Context,
stream: &mut Stream, stream: &mut Stream,
mode: PollReset, mode: PollReset,
) -> Poll<Reason, crate::Error> { ) -> Poll<Result<Reason, crate::Error>> {
match stream.state.ensure_reason(mode)? { match stream.state.ensure_reason(mode)? {
Some(reason) => Ok(reason.into()), Some(reason) => Poll::Ready(Ok(reason)),
None => { None => {
stream.wait_send(); stream.wait_send(cx);
Ok(Async::NotReady) Poll::Pending
}, }
} }
} }
@@ -312,14 +320,18 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), Reason> { ) -> Result<(), Reason> {
if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
log::debug!("recv_stream_window_update !!; err={:?}", e); log::debug!("recv_stream_window_update !!; err={:?}", e);
self.send_reset( self.send_reset(
Reason::FLOW_CONTROL_ERROR.into(), Reason::FLOW_CONTROL_ERROR.into(),
buffer, stream, counts, task); buffer,
stream,
counts,
task,
);
return Err(e); return Err(e);
} }
@@ -344,7 +356,7 @@ impl Send {
buffer: &mut Buffer<Frame<B>>, buffer: &mut Buffer<Frame<B>>,
store: &mut Store, store: &mut Store,
counts: &mut Counts, counts: &mut Counts,
task: &mut Option<Task>, task: &mut Option<Waker>,
) -> Result<(), RecvError> { ) -> Result<(), RecvError> {
// Applies an update to the remote endpoint's initial window size. // Applies an update to the remote endpoint's initial window size.
// //
@@ -444,16 +456,14 @@ impl Send {
} }
pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> { pub fn ensure_next_stream_id(&self) -> Result<StreamId, UserError> {
self.next_stream_id.map_err(|_| UserError::OverflowedStreamId) self.next_stream_id
.map_err(|_| UserError::OverflowedStreamId)
} }
pub fn may_have_created_stream(&self, id: StreamId) -> bool { pub fn may_have_created_stream(&self, id: StreamId) -> bool {
if let Ok(next_id) = self.next_stream_id { if let Ok(next_id) = self.next_stream_id {
// Peer::is_local_init should have been called beforehand // Peer::is_local_init should have been called beforehand
debug_assert_eq!( debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),);
id.is_server_initiated(),
next_id.is_server_initiated(),
);
id < next_id id < next_id
} else { } else {
true true

View File

@@ -2,6 +2,7 @@ use super::*;
use std::time::Instant; use std::time::Instant;
use std::usize; use std::usize;
use std::task::{Context, Waker};
/// Tracks Stream related state /// Tracks Stream related state
/// ///
@@ -47,7 +48,7 @@ pub(super) struct Stream {
pub buffered_send_data: WindowSize, pub buffered_send_data: WindowSize,
/// Task tracking additional send capacity (i.e. window updates). /// Task tracking additional send capacity (i.e. window updates).
send_task: Option<task::Task>, send_task: Option<Waker>,
/// Frames pending for this stream being sent to the socket /// Frames pending for this stream being sent to the socket
pub pending_send: buffer::Deque, pub pending_send: buffer::Deque,
@@ -96,7 +97,7 @@ pub(super) struct Stream {
pub pending_recv: buffer::Deque, pub pending_recv: buffer::Deque,
/// Task tracking receiving frames /// Task tracking receiving frames
pub recv_task: Option<task::Task>, pub recv_task: Option<Waker>,
/// The stream's pending push promises /// The stream's pending push promises
pub pending_push_promises: store::Queue<NextAccept>, pub pending_push_promises: store::Queue<NextAccept>,
@@ -280,17 +281,17 @@ impl Stream {
pub fn notify_send(&mut self) { pub fn notify_send(&mut self) {
if let Some(task) = self.send_task.take() { if let Some(task) = self.send_task.take() {
task.notify(); task.wake();
} }
} }
pub fn wait_send(&mut self) { pub fn wait_send(&mut self, cx: &Context) {
self.send_task = Some(task::current()); self.send_task = Some(cx.waker().clone());
} }
pub fn notify_recv(&mut self) { pub fn notify_recv(&mut self) {
if let Some(task) = self.recv_task.take() { if let Some(task) = self.recv_task.take() {
task.notify(); task.wake();
} }
} }
} }

View File

@@ -1,18 +1,20 @@
use crate::{client, proto, server};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{self, Frame, Reason};
use crate::proto::{peer, Peer, Open, WindowSize};
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use super::recv::RecvHeaderBlockError; use super::recv::RecvHeaderBlockError;
use super::store::{self, Entry, Resolve, Store}; use super::store::{self, Entry, Resolve, Store};
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{self, Frame, Reason};
use crate::proto::{peer, Open, Peer, WindowSize};
use crate::{client, proto, server};
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
use futures::{task, Async, Poll, try_ready}; use futures::ready;
use http::{HeaderMap, Request, Response}; use http::{HeaderMap, Request, Response};
use std::task::{Context, Poll, Waker};
use tokio_io::AsyncWrite; use tokio_io::AsyncWrite;
use std::{fmt, io}; use crate::PollExt;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::{fmt, io};
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Streams<B, P> pub(crate) struct Streams<B, P>
@@ -77,7 +79,7 @@ struct Actions {
send: Send, send: Send,
/// Task that calls `poll_complete`. /// Task that calls `poll_complete`.
task: Option<task::Task>, task: Option<Waker>,
/// If the connection errors, a copy is kept for any StreamRefs. /// If the connection errors, a copy is kept for any StreamRefs.
conn_error: Option<proto::Error>, conn_error: Option<proto::Error>,
@@ -93,7 +95,7 @@ struct SendBuffer<B> {
impl<B, P> Streams<B, P> impl<B, P> Streams<B, P>
where where
B: Buf, B: Buf + Unpin,
P: Peer, P: Peer,
{ {
pub fn new(config: Config) -> Self { pub fn new(config: Config) -> Self {
@@ -134,7 +136,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID than // The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored. // specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() { if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id()); log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring HEADERS",
id,
me.actions.recv.max_stream_id()
);
return Ok(()); return Ok(());
} }
@@ -170,10 +176,10 @@ where
); );
e.insert(stream) e.insert(stream)
}, }
None => return Ok(()), None => return Ok(()),
} }
}, }
}; };
let stream = me.store.resolve(key); let stream = me.store.resolve(key);
@@ -254,15 +260,16 @@ where
// The GOAWAY process has begun. All streams with a greater ID // The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored. // than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() { if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id()); log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring DATA",
id,
me.actions.recv.max_stream_id()
);
return Ok(()); return Ok(());
} }
if me.actions.may_have_forgotten_stream::<P>(id) { if me.actions.may_have_forgotten_stream::<P>(id) {
log::debug!( log::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,);
"recv_data for old stream={:?}, sending STREAM_CLOSED",
id,
);
let sz = frame.payload().len(); let sz = frame.payload().len();
// This should have been enforced at the codec::FramedRead layer, so // This should have been enforced at the codec::FramedRead layer, so
@@ -279,7 +286,7 @@ where
proto_err!(conn: "recv_data: stream not found; id={:?}", id); proto_err!(conn: "recv_data: stream not found; id={:?}", id);
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}, }
}; };
let actions = &mut me.actions; let actions = &mut me.actions;
@@ -294,7 +301,9 @@ where
// we won't give the data to the user, and so they can't // we won't give the data to the user, and so they can't
// release the capacity. We do it automatically. // release the capacity. We do it automatically.
if let Err(RecvError::Stream { .. }) = res { if let Err(RecvError::Stream { .. }) = res {
actions.recv.release_connection_capacity(sz as WindowSize, &mut None); actions
.recv
.release_connection_capacity(sz as WindowSize, &mut None);
} }
actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
}) })
@@ -314,7 +323,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID than // The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored. // specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() { if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id()); log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM",
id,
me.actions.recv.max_stream_id()
);
return Ok(()); return Ok(());
} }
@@ -327,7 +340,7 @@ where
.map_err(RecvError::Connection)?; .map_err(RecvError::Connection)?;
return Ok(()); return Ok(());
}, }
}; };
let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let mut send_buffer = self.send_buffer.inner.lock().unwrap();
@@ -400,14 +413,16 @@ where
actions.recv.go_away(last_stream_id); actions.recv.go_away(last_stream_id);
me.store me.store
.for_each(|stream| if stream.id > last_stream_id { .for_each(|stream| {
counts.transition(stream, |counts, stream| { if stream.id > last_stream_id {
actions.recv.recv_err(&err, &mut *stream); counts.transition(stream, |counts, stream| {
actions.send.recv_err(send_buffer, stream, counts); actions.recv.recv_err(&err, &mut *stream);
actions.send.recv_err(send_buffer, stream, counts);
Ok::<_, ()>(())
})
} else {
Ok::<_, ()>(()) Ok::<_, ()>(())
}) }
} else {
Ok::<_, ()>(())
}) })
.unwrap(); .unwrap();
@@ -470,7 +485,11 @@ where
// The GOAWAY process has begun. All streams with a greater ID // The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored. // than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() { if id > me.actions.recv.max_stream_id() {
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id()); log::trace!(
"id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE",
id,
me.actions.recv.max_stream_id()
);
return Ok(()); return Ok(());
} }
@@ -480,8 +499,8 @@ where
} }
None => { None => {
proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state");
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}, }
}; };
// TODO: Streams in the reserved states do not count towards the concurrency // TODO: Streams in the reserved states do not count towards the concurrency
@@ -495,7 +514,12 @@ where
// //
// If `None` is returned, then the stream is being refused. There is no // If `None` is returned, then the stream is being refused. There is no
// further work to be done. // further work to be done.
if me.actions.recv.open(promised_id, Open::PushPromise, &mut me.counts)?.is_none() { if me
.actions
.recv
.open(promised_id, Open::PushPromise, &mut me.counts)?
.is_none()
{
return Ok(()); return Ok(());
} }
@@ -507,21 +531,26 @@ where
Stream::new( Stream::new(
promised_id, promised_id,
me.actions.send.init_window_sz(), me.actions.send.init_window_sz(),
me.actions.recv.init_window_sz()) me.actions.recv.init_window_sz(),
)
}); });
let actions = &mut me.actions; let actions = &mut me.actions;
me.counts.transition(stream, |counts, stream| { me.counts.transition(stream, |counts, stream| {
let stream_valid = let stream_valid = actions.recv.recv_push_promise(frame, stream);
actions.recv.recv_push_promise(frame, stream);
match stream_valid { match stream_valid {
Ok(()) => Ok(()) => Ok(Some(stream.key())),
Ok(Some(stream.key())),
_ => { _ => {
let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let mut send_buffer = self.send_buffer.inner.lock().unwrap();
actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, stream_valid) actions
.reset_on_recv_stream_err(
&mut *send_buffer,
stream,
counts,
stream_valid,
)
.map(|()| None) .map(|()| None)
} }
} }
@@ -549,7 +578,11 @@ where
me.refs += 1; me.refs += 1;
key.map(|key| { key.map(|key| {
let stream = &mut me.store.resolve(key); let stream = &mut me.store.resolve(key);
log::trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state); log::trace!(
"next_incoming; id={:?}, state={:?}",
stream.id,
stream.state
);
StreamRef { StreamRef {
opaque: OpaqueStreamRef::new(self.inner.clone(), stream), opaque: OpaqueStreamRef::new(self.inner.clone(), stream),
send_buffer: self.send_buffer.clone(), send_buffer: self.send_buffer.clone(),
@@ -559,25 +592,33 @@ where
pub fn send_pending_refusal<T>( pub fn send_pending_refusal<T>(
&mut self, &mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>, dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<(), io::Error> ) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Unpin,
{ {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
me.actions.recv.send_pending_refusal(dst) me.actions.recv.send_pending_refusal(cx, dst)
} }
pub fn clear_expired_reset_streams(&mut self) { pub fn clear_expired_reset_streams(&mut self) {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
me.actions.recv.clear_expired_reset_streams(&mut me.store, &mut me.counts); me.actions
.recv
.clear_expired_reset_streams(&mut me.store, &mut me.counts);
} }
pub fn poll_complete<T>(&mut self, dst: &mut Codec<T, Prioritized<B>>) -> Poll<(), io::Error> pub fn poll_complete<T>(
&mut self,
cx: &mut Context,
dst: &mut Codec<T, Prioritized<B>>,
) -> Poll<io::Result<()>>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
{ {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
@@ -589,20 +630,21 @@ where
// //
// TODO: It would probably be better to interleave updates w/ data // TODO: It would probably be better to interleave updates w/ data
// frames. // frames.
try_ready!(me.actions.recv.poll_complete(&mut me.store, &mut me.counts, dst)); ready!(me
.actions
.recv
.poll_complete(cx, &mut me.store, &mut me.counts, dst))?;
// Send any other pending frames // Send any other pending frames
try_ready!(me.actions.send.poll_complete( ready!(me
send_buffer, .actions
&mut me.store, .send
&mut me.counts, .poll_complete(cx, send_buffer, &mut me.store, &mut me.counts, dst))?;
dst
));
// Nothing else to do, track the task // Nothing else to do, track the task
me.actions.task = Some(task::current()); me.actions.task = Some(cx.waker().clone());
Ok(().into()) Poll::Ready(Ok(()))
} }
pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> {
@@ -615,7 +657,12 @@ where
me.counts.apply_remote_settings(frame); me.counts.apply_remote_settings(frame);
me.actions.send.apply_remote_settings( me.actions.send.apply_remote_settings(
frame, send_buffer, &mut me.store, &mut me.counts, &mut me.actions.task) frame,
send_buffer,
&mut me.store,
&mut me.counts,
&mut me.actions.task,
)
} }
pub fn send_request( pub fn send_request(
@@ -624,8 +671,8 @@ where
end_of_stream: bool, end_of_stream: bool,
pending: Option<&OpaqueStreamRef>, pending: Option<&OpaqueStreamRef>,
) -> Result<StreamRef<B>, SendError> { ) -> Result<StreamRef<B>, SendError> {
use http::Method;
use super::stream::ContentLength; use super::stream::ContentLength;
use http::Method;
// TODO: There is a hazard with assigning a stream ID before the // TODO: There is a hazard with assigning a stream ID before the
// prioritize layer. If prioritization reorders new streams, this // prioritize layer. If prioritization reorders new streams, this
@@ -671,8 +718,7 @@ where
} }
// Convert the message // Convert the message
let headers = client::Peer::convert_send_message( let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?;
stream_id, request, end_of_stream)?;
let mut stream = me.store.insert(stream.id, stream); let mut stream = me.store.insert(stream.id, stream);
@@ -701,10 +747,7 @@ where
me.refs += 1; me.refs += 1;
Ok(StreamRef { Ok(StreamRef {
opaque: OpaqueStreamRef::new( opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream),
self.inner.clone(),
&mut stream,
),
send_buffer: self.send_buffer.clone(), send_buffer: self.send_buffer.clone(),
}) })
} }
@@ -719,13 +762,14 @@ where
let stream = Stream::new(id, 0, 0); let stream = Stream::new(id, 0, 0);
e.insert(stream) e.insert(stream)
}, }
}; };
let stream = me.store.resolve(key); let stream = me.store.resolve(key);
let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let mut send_buffer = self.send_buffer.inner.lock().unwrap();
let send_buffer = &mut *send_buffer; let send_buffer = &mut *send_buffer;
me.actions.send_reset(stream, reason, &mut me.counts, send_buffer); me.actions
.send_reset(stream, reason, &mut me.counts, send_buffer);
} }
pub fn send_go_away(&mut self, last_processed_id: StreamId) { pub fn send_go_away(&mut self, last_processed_id: StreamId) {
@@ -740,7 +784,11 @@ impl<B> Streams<B, client::Peer>
where where
B: Buf, B: Buf,
{ {
pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), crate::Error> { pub fn poll_pending_open(
&mut self,
cx: &Context,
pending: Option<&OpaqueStreamRef>,
) -> Poll<Result<(), crate::Error>> {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
@@ -751,11 +799,11 @@ where
let mut stream = me.store.resolve(pending.key); let mut stream = me.store.resolve(pending.key);
log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
if stream.is_pending_open { if stream.is_pending_open {
stream.wait_send(); stream.wait_send(cx);
return Ok(Async::NotReady); return Poll::Pending;
} }
} }
Ok(().into()) Poll::Ready(Ok(()))
} }
} }
@@ -845,7 +893,6 @@ where
} }
} }
// ===== impl StreamRef ===== // ===== impl StreamRef =====
impl<B> StreamRef<B> { impl<B> StreamRef<B> {
@@ -867,12 +914,9 @@ impl<B> StreamRef<B> {
frame.set_end_stream(end_stream); frame.set_end_stream(end_stream);
// Send the data frame // Send the data frame
actions.send.send_data( actions
frame, .send
send_buffer, .send_data(frame, send_buffer, stream, counts, &mut actions.task)
stream,
counts,
&mut actions.task)
}) })
} }
@@ -890,8 +934,9 @@ impl<B> StreamRef<B> {
let frame = frame::Headers::trailers(stream.id, trailers); let frame = frame::Headers::trailers(stream.id, trailers);
// Send the trailers frame // Send the trailers frame
actions.send.send_trailers( actions
frame, send_buffer, stream, counts, &mut actions.task) .send
.send_trailers(frame, send_buffer, stream, counts, &mut actions.task)
}) })
} }
@@ -903,7 +948,8 @@ impl<B> StreamRef<B> {
let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let mut send_buffer = self.send_buffer.inner.lock().unwrap();
let send_buffer = &mut *send_buffer; let send_buffer = &mut *send_buffer;
me.actions.send_reset(stream, reason, &mut me.counts, send_buffer); me.actions
.send_reset(stream, reason, &mut me.counts, send_buffer);
} }
pub fn send_response( pub fn send_response(
@@ -922,8 +968,9 @@ impl<B> StreamRef<B> {
me.counts.transition(stream, |counts, stream| { me.counts.transition(stream, |counts, stream| {
let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream); let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream);
actions.send.send_headers( actions
frame, send_buffer, stream, counts, &mut actions.task) .send
.send_headers(frame, send_buffer, stream, counts, &mut actions.task)
}) })
} }
@@ -955,7 +1002,9 @@ impl<B> StreamRef<B> {
let mut stream = me.store.resolve(self.opaque.key); let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.reserve_capacity(capacity, &mut stream, &mut me.counts) me.actions
.send
.reserve_capacity(capacity, &mut stream, &mut me.counts)
} }
/// Returns the stream's current send capacity. /// Returns the stream's current send capacity.
@@ -969,28 +1018,35 @@ impl<B> StreamRef<B> {
} }
/// Request to be notified when the stream's capacity increases /// Request to be notified when the stream's capacity increases
pub fn poll_capacity(&mut self) -> Poll<Option<WindowSize>, UserError> { pub fn poll_capacity(&mut self, cx: &Context) -> Poll<Option<Result<WindowSize, UserError>>> {
let mut me = self.opaque.inner.lock().unwrap(); let mut me = self.opaque.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let mut stream = me.store.resolve(self.opaque.key); let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.poll_capacity(&mut stream) me.actions.send.poll_capacity(cx, &mut stream)
} }
/// Request to be notified for if a `RST_STREAM` is received for this stream. /// Request to be notified for if a `RST_STREAM` is received for this stream.
pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll<Reason, crate::Error> { pub(crate) fn poll_reset(
&mut self,
cx: &Context,
mode: proto::PollReset,
) -> Poll<Result<Reason, crate::Error>> {
let mut me = self.opaque.inner.lock().unwrap(); let mut me = self.opaque.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let mut stream = me.store.resolve(self.opaque.key); let mut stream = me.store.resolve(self.opaque.key);
me.actions.send.poll_reset(&mut stream, mode) me.actions
.send
.poll_reset(cx, &mut stream, mode)
.map_err(From::from) .map_err(From::from)
} }
pub fn clone_to_opaque(&self) -> OpaqueStreamRef pub fn clone_to_opaque(&self) -> OpaqueStreamRef
where B: 'static, where
B: 'static,
{ {
self.opaque.clone() self.opaque.clone()
} }
@@ -1015,35 +1071,37 @@ impl OpaqueStreamRef {
fn new(inner: Arc<Mutex<Inner>>, stream: &mut store::Ptr) -> OpaqueStreamRef { fn new(inner: Arc<Mutex<Inner>>, stream: &mut store::Ptr) -> OpaqueStreamRef {
stream.ref_inc(); stream.ref_inc();
OpaqueStreamRef { OpaqueStreamRef {
inner, key: stream.key() inner,
key: stream.key(),
} }
} }
/// Called by a client to check for a received response. /// Called by a client to check for a received response.
pub fn poll_response(&mut self) -> Poll<Response<()>, proto::Error> { pub fn poll_response(&mut self, cx: &Context) -> Poll<Result<Response<()>, proto::Error>> {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let mut stream = me.store.resolve(self.key); let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_response(&mut stream) me.actions.recv.poll_response(cx, &mut stream)
} }
/// Called by a client to check for a pushed request. /// Called by a client to check for a pushed request.
pub fn poll_pushed( pub fn poll_pushed(
&mut self &mut self,
) -> Poll<Option<(Request<()>, OpaqueStreamRef)>, proto::Error> { cx: &Context,
) -> Poll<Option<Result<(Request<()>, OpaqueStreamRef), proto::Error>>> {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let res = { let mut stream = me.store.resolve(self.key);
let mut stream = me.store.resolve(self.key); me.actions
try_ready!(me.actions.recv.poll_pushed(&mut stream)) .recv
}; .poll_pushed(cx, &mut stream)
Ok(Async::Ready(res.map(|(h, key)| { .map_ok_(|(h, key)| {
me.refs += 1; me.refs += 1;
let opaque_ref = let opaque_ref =
OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key)); OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key));
(h, opaque_ref) (h, opaque_ref)
}))) })
} }
pub fn body_is_empty(&self) -> bool { pub fn body_is_empty(&self) -> bool {
@@ -1064,22 +1122,22 @@ impl OpaqueStreamRef {
me.actions.recv.is_end_stream(&stream) me.actions.recv.is_end_stream(&stream)
} }
pub fn poll_data(&mut self) -> Poll<Option<Bytes>, proto::Error> { pub fn poll_data(&mut self, cx: &Context) -> Poll<Option<Result<Bytes, proto::Error>>> {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let mut stream = me.store.resolve(self.key); let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_data(&mut stream) me.actions.recv.poll_data(cx, &mut stream)
} }
pub fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, proto::Error> { pub fn poll_trailers(&mut self, cx: &Context) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
let mut me = self.inner.lock().unwrap(); let mut me = self.inner.lock().unwrap();
let me = &mut *me; let me = &mut *me;
let mut stream = me.store.resolve(self.key); let mut stream = me.store.resolve(self.key);
me.actions.recv.poll_trailers(&mut stream) me.actions.recv.poll_trailers(cx, &mut stream)
} }
/// Releases recv capacity back to the peer. This may result in sending /// Releases recv capacity back to the peer. This may result in sending
@@ -1101,16 +1159,11 @@ impl OpaqueStreamRef {
let mut stream = me.store.resolve(self.key); let mut stream = me.store.resolve(self.key);
me.actions me.actions.recv.clear_recv_buffer(&mut stream);
.recv
.clear_recv_buffer(&mut stream);
} }
pub fn stream_id(&self) -> StreamId { pub fn stream_id(&self) -> StreamId {
self.inner.lock() self.inner.lock().unwrap().store[self.key].id
.unwrap()
.store[self.key]
.id
} }
} }
@@ -1125,17 +1178,15 @@ impl fmt::Debug for OpaqueStreamRef {
.field("stream_id", &stream.id) .field("stream_id", &stream.id)
.field("ref_count", &stream.ref_count) .field("ref_count", &stream.ref_count)
.finish() .finish()
},
Err(Poisoned(_)) => {
fmt.debug_struct("OpaqueStreamRef")
.field("inner", &"<Poisoned>")
.finish()
}
Err(WouldBlock) => {
fmt.debug_struct("OpaqueStreamRef")
.field("inner", &"<Locked>")
.finish()
} }
Err(Poisoned(_)) => fmt
.debug_struct("OpaqueStreamRef")
.field("inner", &"<Poisoned>")
.finish(),
Err(WouldBlock) => fmt
.debug_struct("OpaqueStreamRef")
.field("inner", &"<Locked>")
.finish(),
} }
} }
} }
@@ -1164,12 +1215,14 @@ impl Drop for OpaqueStreamRef {
fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) { fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
let mut me = match inner.lock() { let mut me = match inner.lock() {
Ok(inner) => inner, Ok(inner) => inner,
Err(_) => if ::std::thread::panicking() { Err(_) => {
log::trace!("StreamRef::drop; mutex poisoned"); if ::std::thread::panicking() {
return; log::trace!("StreamRef::drop; mutex poisoned");
} else { return;
panic!("StreamRef::drop; mutex poisoned"); } else {
}, panic!("StreamRef::drop; mutex poisoned");
}
}
}; };
let me = &mut *me; let me = &mut *me;
@@ -1189,19 +1242,19 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
// (connection) so that it can close properly // (connection) so that it can close properly
if stream.ref_count == 0 && stream.is_closed() { if stream.ref_count == 0 && stream.is_closed() {
if let Some(task) = actions.task.take() { if let Some(task) = actions.task.take() {
task.notify(); task.wake();
} }
} }
me.counts.transition(stream, |counts, stream| { me.counts.transition(stream, |counts, stream| {
maybe_cancel(stream, actions, counts); maybe_cancel(stream, actions, counts);
if stream.ref_count == 0 { if stream.ref_count == 0 {
// Release any recv window back to connection, no one can access // Release any recv window back to connection, no one can access
// it anymore. // it anymore.
actions.recv.release_closed_capacity(stream, &mut actions.task); actions
.recv
.release_closed_capacity(stream, &mut actions.task);
// We won't be able to reach our push promises anymore // We won't be able to reach our push promises anymore
let mut ppp = stream.pending_push_promises.take(); let mut ppp = stream.pending_push_promises.take();
@@ -1216,11 +1269,9 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) {
if stream.is_canceled_interest() { if stream.is_canceled_interest() {
actions.send.schedule_implicit_reset( actions
stream, .send
Reason::CANCEL, .schedule_implicit_reset(stream, Reason::CANCEL, counts, &mut actions.task);
counts,
&mut actions.task);
actions.recv.enqueue_reset_expiration(stream, counts); actions.recv.enqueue_reset_expiration(stream, counts);
} }
} }
@@ -1245,8 +1296,8 @@ impl Actions {
send_buffer: &mut Buffer<Frame<B>>, send_buffer: &mut Buffer<Frame<B>>,
) { ) {
counts.transition(stream, |counts, stream| { counts.transition(stream, |counts, stream| {
self.send.send_reset( self.send
reason, send_buffer, stream, counts, &mut self.task); .send_reset(reason, send_buffer, stream, counts, &mut self.task);
self.recv.enqueue_reset_expiration(stream, counts); self.recv.enqueue_reset_expiration(stream, counts);
// if a RecvStream is parked, ensure it's notified // if a RecvStream is parked, ensure it's notified
stream.notify_recv(); stream.notify_recv();
@@ -1260,12 +1311,10 @@ impl Actions {
counts: &mut Counts, counts: &mut Counts,
res: Result<(), RecvError>, res: Result<(), RecvError>,
) -> Result<(), RecvError> { ) -> Result<(), RecvError> {
if let Err(RecvError::Stream { if let Err(RecvError::Stream { reason, .. }) = res {
reason, ..
}) = res
{
// Reset the stream. // Reset the stream.
self.send.send_reset(reason, buffer, stream, counts, &mut self.task); self.send
.send_reset(reason, buffer, stream, counts, &mut self.task);
Ok(()) Ok(())
} else { } else {
res res
@@ -1308,11 +1357,7 @@ impl Actions {
} }
} }
fn clear_queues(&mut self, fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) {
clear_pending_accept: bool,
store: &mut Store,
counts: &mut Counts)
{
self.recv.clear_queues(clear_pending_accept, store, counts); self.recv.clear_queues(clear_pending_accept, store, counts);
self.send.clear_queues(store, counts); self.send.clear_queues(store, counts);
} }

View File

@@ -64,50 +64,45 @@
//! will use the HTTP/2.0 protocol without prior negotiation. //! will use the HTTP/2.0 protocol without prior negotiation.
//! //!
//! ```rust //! ```rust
//! use futures::{Future, Stream}; //! #![feature(async_await)]
//! # use futures::future::ok; //! use futures::StreamExt;
//! use h2::server; //! use h2::server;
//! use http::{Response, StatusCode}; //! use http::{Response, StatusCode};
//! use tokio::net::TcpListener; //! use tokio::net::TcpListener;
//! //!
//! pub fn main () { //! #[tokio::main]
//! pub async fn main () {
//! let addr = "127.0.0.1:5928".parse().unwrap(); //! let addr = "127.0.0.1:5928".parse().unwrap();
//! let listener = TcpListener::bind(&addr,).unwrap(); //! let listener = TcpListener::bind(&addr,).unwrap();
//! //!
//! tokio::run({ //! // Accept all incoming TCP connections.
//! // Accept all incoming TCP connections. //! let mut incoming = listener.incoming();
//! listener.incoming().for_each(move |socket| { //! # futures::future::select(Box::pin(async {
//! // Spawn a new task to process each connection. //! while let Some(socket) = incoming.next().await {
//! tokio::spawn({ //! // Spawn a new task to process each connection.
//! // Start the HTTP/2.0 connection handshake //! tokio::spawn(async {
//! server::handshake(socket) //! // Start the HTTP/2.0 connection handshake
//! .and_then(|h2| { //! let mut h2 = server::handshake(socket.unwrap()).await.unwrap();
//! // Accept all inbound HTTP/2.0 streams sent over the //! // Accept all inbound HTTP/2.0 streams sent over the
//! // connection. //! // connection.
//! h2.for_each(|(request, mut respond)| { //! while let Some(request) = h2.next().await {
//! println!("Received request: {:?}", request); //! let (request, mut respond) = request.unwrap();
//! println!("Received request: {:?}", request);
//! //!
//! // Build a response with no body //! // Build a response with no body
//! let response = Response::builder() //! let response = Response::builder()
//! .status(StatusCode::OK) //! .status(StatusCode::OK)
//! .body(()) //! .body(())
//! .unwrap(); //! .unwrap();
//! //!
//! // Send the response back to the client //! // Send the response back to the client
//! respond.send_response(response, true) //! respond.send_response(response, true)
//! .unwrap(); //! .unwrap();
//! }
//! //!
//! Ok(()) //! });
//! }) //! }
//! }) //! # }), Box::pin(async {})).await;
//! .map_err(|e| panic!("unexpected error = {:?}", e))
//! });
//!
//! Ok(())
//! })
//! .map_err(|e| panic!("failed to run HTTP/2.0 server: {:?}", e))
//! # .select(ok(())).map(|_|()).map_err(|_|())
//! });
//! } //! }
//! ``` //! ```
//! //!
@@ -124,17 +119,20 @@
//! [`SendStream`]: ../struct.SendStream.html //! [`SendStream`]: ../struct.SendStream.html
//! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html //! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html
use crate::{SendStream, RecvStream, ReleaseCapacity, PingPong};
use crate::codec::{Codec, RecvError}; use crate::codec::{Codec, RecvError};
use crate::frame::{self, Pseudo, Reason, Settings, StreamId}; use crate::frame::{self, Pseudo, Reason, Settings, StreamId};
use crate::proto::{self, Config, Prioritized}; use crate::proto::{self, Config, Prioritized};
use crate::{PingPong, RecvStream, ReleaseCapacity, SendStream};
use bytes::{Buf, Bytes, IntoBuf}; use bytes::{Buf, Bytes, IntoBuf};
use futures::{self, Async, Future, Poll, try_ready}; use futures::ready;
use http::{HeaderMap, Request, Response}; use http::{HeaderMap, Request, Response};
use std::{convert, fmt, io, mem}; use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use tokio_io::{AsyncRead, AsyncWrite, try_nb}; use std::{convert, fmt, io, mem};
use tokio_io::{AsyncRead, AsyncWrite};
/// In progress HTTP/2.0 connection handshake future. /// In progress HTTP/2.0 connection handshake future.
/// ///
@@ -155,7 +153,7 @@ pub struct Handshake<T, B: IntoBuf = Bytes> {
/// The config to pass to Connection::new after handshake succeeds. /// The config to pass to Connection::new after handshake succeeds.
builder: Builder, builder: Builder,
/// The current state of the handshake. /// The current state of the handshake.
state: Handshaking<T, B> state: Handshaking<T, B>,
} }
/// Accepts inbound HTTP/2.0 streams on a connection. /// Accepts inbound HTTP/2.0 streams on a connection.
@@ -179,21 +177,19 @@ pub struct Handshake<T, B: IntoBuf = Bytes> {
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// # use futures::{Future, Stream}; /// #![feature(async_await)]
/// # use futures::StreamExt;
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server; /// # use h2::server;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) { /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) {
/// server::handshake(my_io) /// let mut server = server::handshake(my_io).await.unwrap();
/// .and_then(|server| { /// while let Some(request) = server.next().await {
/// server.for_each(|(request, respond)| { /// let (request, respond) = request.unwrap();
/// // Process the request and send the response back to the client /// // Process the request and send the response back to the client
/// // using `respond`. /// // using `respond`.
/// # Ok(()) /// }
/// })
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # /// #
/// # pub fn main() {} /// # pub fn main() {}
@@ -224,7 +220,7 @@ pub struct Connection<T, B: IntoBuf> {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -318,26 +314,23 @@ const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// #![feature(async_await)]
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use futures::*;
/// # use h2::server; /// # use h2::server;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # { /// # {
/// server::handshake(my_io) /// let connection = server::handshake(my_io).await.unwrap();
/// .and_then(|connection| { /// // The HTTP/2.0 handshake has completed, now use `connection` to
/// // The HTTP/2.0 handshake has completed, now use `connection` to /// // accept inbound HTTP/2.0 streams.
/// // accept inbound HTTP/2.0 streams.
/// # Ok(())
/// })
/// # .wait().unwrap();
/// # } /// # }
/// # /// #
/// # pub fn main() {} /// # pub fn main() {}
/// ``` /// ```
pub fn handshake<T>(io: T) -> Handshake<T, Bytes> pub fn handshake<T>(io: T) -> Handshake<T, Bytes>
where T: AsyncRead + AsyncWrite, where
T: AsyncRead + AsyncWrite + Unpin,
{ {
Builder::new().handshake(io) Builder::new().handshake(io)
} }
@@ -346,8 +339,9 @@ where T: AsyncRead + AsyncWrite,
impl<T, B> Connection<T, B> impl<T, B> Connection<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
fn handshake2(io: T, builder: Builder) -> Handshake<T, B> { fn handshake2(io: T, builder: Builder) -> Handshake<T, B> {
// Create the codec. // Create the codec.
@@ -407,11 +401,14 @@ where
/// [`poll`]: struct.Connection.html#method.poll /// [`poll`]: struct.Connection.html#method.poll
/// [`RecvStream`]: ../struct.RecvStream.html /// [`RecvStream`]: ../struct.RecvStream.html
/// [`SendStream`]: ../struct.SendStream.html /// [`SendStream`]: ../struct.SendStream.html
pub fn poll_close(&mut self) -> Poll<(), crate::Error> { pub fn poll_close(&mut self, cx: &mut Context) -> Poll<Result<(), crate::Error>> {
self.connection.poll().map_err(Into::into) self.connection.poll(cx).map_err(Into::into)
} }
#[deprecated(note="use abrupt_shutdown or graceful_shutdown instead", since="0.1.4")] #[deprecated(
note = "use abrupt_shutdown or graceful_shutdown instead",
since = "0.1.4"
)]
#[doc(hidden)] #[doc(hidden)]
pub fn close_connection(&mut self) { pub fn close_connection(&mut self) {
self.graceful_shutdown(); self.graceful_shutdown();
@@ -453,31 +450,28 @@ where
/// ///
/// This may only be called once. Calling multiple times will return `None`. /// This may only be called once. Calling multiple times will return `None`.
pub fn ping_pong(&mut self) -> Option<PingPong> { pub fn ping_pong(&mut self) -> Option<PingPong> {
self.connection self.connection.take_user_pings().map(PingPong::new)
.take_user_pings()
.map(PingPong::new)
} }
} }
impl<T, B> futures::Stream for Connection<T, B> impl<T, B> futures::Stream for Connection<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: 'static, B::Buf: Unpin + 'static,
{ {
type Item = (Request<RecvStream>, SendResponse<B>); type Item = Result<(Request<RecvStream>, SendResponse<B>), crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, crate::Error> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// Always try to advance the internal state. Getting NotReady also is // Always try to advance the internal state. Getting Pending also is
// needed to allow this function to return NotReady. // needed to allow this function to return Pending.
match self.poll_close()? { match self.poll_close(cx)? {
Async::Ready(_) => { Poll::Ready(_) => {
// If the socket is closed, don't return anything // If the socket is closed, don't return anything
// TODO: drop any pending streams // TODO: drop any pending streams
return Ok(None.into()); return Poll::Ready(None);
}, }
_ => {}, _ => {}
} }
if let Some(inner) = self.connection.next_incoming() { if let Some(inner) = self.connection.next_incoming() {
@@ -488,10 +482,10 @@ where
let request = Request::from_parts(head, body); let request = Request::from_parts(head, body);
let respond = SendResponse { inner }; let respond = SendResponse { inner };
return Ok(Some((request, respond)).into()); return Poll::Ready(Some(Ok((request, respond))));
} }
Ok(Async::NotReady) Poll::Pending
} }
} }
@@ -522,7 +516,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -561,7 +555,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -595,7 +589,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -628,7 +622,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -667,7 +661,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -715,7 +709,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -761,7 +755,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -808,7 +802,7 @@ impl Builder {
/// # use h2::server::*; /// # use h2::server::*;
/// # use std::time::Duration; /// # use std::time::Duration;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -850,7 +844,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T> /// # -> Handshake<T>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -870,7 +864,7 @@ impl Builder {
/// # use tokio_io::*; /// # use tokio_io::*;
/// # use h2::server::*; /// # use h2::server::*;
/// # /// #
/// # fn doc<T: AsyncRead + AsyncWrite>(my_io: T) /// # fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Handshake<T, &'static [u8]> /// # -> Handshake<T, &'static [u8]>
/// # { /// # {
/// // `server_fut` is a future representing the completion of the HTTP/2.0 /// // `server_fut` is a future representing the completion of the HTTP/2.0
@@ -884,9 +878,9 @@ impl Builder {
/// ``` /// ```
pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B> pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf, B: IntoBuf + Unpin,
B::Buf: 'static, B::Buf: Unpin + 'static,
{ {
Connection::handshake2(io, self.clone()) Connection::handshake2(io, self.clone())
} }
@@ -949,7 +943,7 @@ impl<B: IntoBuf> SendResponse<B> {
/// Polls to be notified when the client resets this stream. /// Polls to be notified when the client resets this stream.
/// ///
/// If stream is still open, this returns `Ok(Async::NotReady)`, and /// If stream is still open, this returns `Poll::Pending`, and
/// registers the task to be notified if a `RST_STREAM` is received. /// registers the task to be notified if a `RST_STREAM` is received.
/// ///
/// If a `RST_STREAM` frame is received for this stream, calling this /// If a `RST_STREAM` frame is received for this stream, calling this
@@ -959,8 +953,8 @@ impl<B: IntoBuf> SendResponse<B> {
/// ///
/// Calling this method after having called `send_response` will return /// Calling this method after having called `send_response` will return
/// a user error. /// a user error.
pub fn poll_reset(&mut self) -> Poll<Reason, crate::Error> { pub fn poll_reset(&mut self, cx: &mut Context) -> Poll<Result<Reason, crate::Error>> {
self.inner.poll_reset(proto::PollReset::AwaitingHeaders) self.inner.poll_reset(cx, proto::PollReset::AwaitingHeaders)
} }
/// Returns the stream ID of the response stream. /// Returns the stream ID of the response stream.
@@ -979,26 +973,23 @@ impl<B: IntoBuf> SendResponse<B> {
impl<T, B: Buf> Flush<T, B> { impl<T, B: Buf> Flush<T, B> {
fn new(codec: Codec<T, B>) -> Self { fn new(codec: Codec<T, B>) -> Self {
Flush { Flush { codec: Some(codec) }
codec: Some(codec),
}
} }
} }
impl<T, B> Future for Flush<T, B> impl<T, B> Future for Flush<T, B>
where where
T: AsyncWrite, T: AsyncWrite + Unpin,
B: Buf, B: Buf + Unpin,
{ {
type Item = Codec<T, B>; type Output = Result<Codec<T, B>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Flush the codec // Flush the codec
try_ready!(self.codec.as_mut().unwrap().flush()); ready!(self.codec.as_mut().unwrap().flush(cx))?;
// Return the codec // Return the codec
Ok(Async::Ready(self.codec.take().unwrap())) Poll::Ready(Ok(self.codec.take().unwrap()))
} }
} }
@@ -1017,49 +1008,50 @@ impl<T, B: Buf> ReadPreface<T, B> {
impl<T, B> Future for ReadPreface<T, B> impl<T, B> Future for ReadPreface<T, B>
where where
T: AsyncRead, T: AsyncRead + Unpin,
B: Buf, B: Buf + Unpin,
{ {
type Item = Codec<T, B>; type Output = Result<Codec<T, B>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut buf = [0; 24]; let mut buf = [0; 24];
let mut rem = PREFACE.len() - self.pos; let mut rem = PREFACE.len() - self.pos;
while rem > 0 { while rem > 0 {
let n = try_nb!(self.inner_mut().read(&mut buf[..rem])); let n = ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf[..rem]))?;
if n == 0 { if n == 0 {
return Err(io::Error::new( return Poll::Ready(Err(io::Error::new(
io::ErrorKind::ConnectionReset, io::ErrorKind::ConnectionReset,
"connection closed unexpectedly", "connection closed unexpectedly",
).into()); )
.into()));
} }
if PREFACE[self.pos..self.pos + n] != buf[..n] { if PREFACE[self.pos..self.pos + n] != buf[..n] {
proto_err!(conn: "read_preface: invalid preface"); proto_err!(conn: "read_preface: invalid preface");
// TODO: Should this just write the GO_AWAY frame directly? // TODO: Should this just write the GO_AWAY frame directly?
return Err(Reason::PROTOCOL_ERROR.into()); return Poll::Ready(Err(Reason::PROTOCOL_ERROR.into()));
} }
self.pos += n; self.pos += n;
rem -= n; // TODO test rem -= n; // TODO test
} }
Ok(Async::Ready(self.codec.take().unwrap())) Poll::Ready(Ok(self.codec.take().unwrap()))
} }
} }
// ===== impl Handshake ===== // ===== impl Handshake =====
impl<T, B: IntoBuf> Future for Handshake<T, B> impl<T, B: IntoBuf> Future for Handshake<T, B>
where T: AsyncRead + AsyncWrite, where
B: IntoBuf, T: AsyncRead + AsyncWrite + Unpin,
B: IntoBuf + Unpin,
B::Buf: Unpin,
{ {
type Item = Connection<T, B>; type Output = Result<Connection<T, B>, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
log::trace!("Handshake::poll(); state={:?};", self.state); log::trace!("Handshake::poll(); state={:?};", self.state);
use crate::server::Handshaking::*; use crate::server::Handshaking::*;
@@ -1067,12 +1059,12 @@ impl<T, B: IntoBuf> Future for Handshake<T, B>
// We're currently flushing a pending SETTINGS frame. Poll the // We're currently flushing a pending SETTINGS frame. Poll the
// flush future, and, if it's completed, advance our state to wait // flush future, and, if it's completed, advance our state to wait
// for the client preface. // for the client preface.
let codec = match flush.poll()? { let codec = match Pin::new(flush).poll(cx)? {
Async::NotReady => { Poll::Pending => {
log::trace!("Handshake::poll(); flush.poll()=NotReady"); log::trace!("Handshake::poll(); flush.poll()=Pending");
return Ok(Async::NotReady); return Poll::Pending;
}, }
Async::Ready(flushed) => { Poll::Ready(flushed) => {
log::trace!("Handshake::poll(); flush.poll()=Ready"); log::trace!("Handshake::poll(); flush.poll()=Ready");
flushed flushed
} }
@@ -1089,38 +1081,41 @@ impl<T, B: IntoBuf> Future for Handshake<T, B>
// We're now waiting for the client preface. Poll the `ReadPreface` // We're now waiting for the client preface. Poll the `ReadPreface`
// future. If it has completed, we will create a `Connection` handle // future. If it has completed, we will create a `Connection` handle
// for the connection. // for the connection.
read.poll() Pin::new(read).poll(cx)
// Actually creating the `Connection` has to occur outside of this // Actually creating the `Connection` has to occur outside of this
// `if let` block, because we've borrowed `self` mutably in order // `if let` block, because we've borrowed `self` mutably in order
// to poll the state and won't be able to borrow the SETTINGS frame // to poll the state and won't be able to borrow the SETTINGS frame
// as well until we release the borrow for `poll()`. // as well until we release the borrow for `poll()`.
} else { } else {
unreachable!("Handshake::poll() state was not advanced completely!") unreachable!("Handshake::poll() state was not advanced completely!")
}; };
let server = poll?.map(|codec| { poll?.map(|codec| {
let connection = proto::Connection::new(codec, Config { let connection = proto::Connection::new(
next_stream_id: 2.into(), codec,
// Server does not need to locally initiate any streams Config {
initial_max_send_streams: 0, next_stream_id: 2.into(),
reset_stream_duration: self.builder.reset_stream_duration, // Server does not need to locally initiate any streams
reset_stream_max: self.builder.reset_stream_max, initial_max_send_streams: 0,
settings: self.builder.settings.clone(), reset_stream_duration: self.builder.reset_stream_duration,
}); reset_stream_max: self.builder.reset_stream_max,
settings: self.builder.settings.clone(),
},
);
log::trace!("Handshake::poll(); connection established!"); log::trace!("Handshake::poll(); connection established!");
let mut c = Connection { connection }; let mut c = Connection { connection };
if let Some(sz) = self.builder.initial_target_connection_window_size { if let Some(sz) = self.builder.initial_target_connection_window_size {
c.set_target_window_size(sz); c.set_target_window_size(sz);
} }
c Ok(c)
}); })
Ok(server)
} }
} }
impl<T, B> fmt::Debug for Handshake<T, B> impl<T, B> fmt::Debug for Handshake<T, B>
where T: AsyncRead + AsyncWrite + fmt::Debug, where
B: fmt::Debug + IntoBuf, T: AsyncRead + AsyncWrite + fmt::Debug,
B: fmt::Debug + IntoBuf,
{ {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "server::Handshake") write!(fmt, "server::Handshake")
@@ -1131,16 +1126,14 @@ impl Peer {
pub fn convert_send_message( pub fn convert_send_message(
id: StreamId, id: StreamId,
response: Response<()>, response: Response<()>,
end_of_stream: bool) -> frame::Headers end_of_stream: bool,
{ ) -> frame::Headers {
use http::response::Parts; use http::response::Parts;
// Extract the components of the HTTP request // Extract the components of the HTTP request
let ( let (
Parts { Parts {
status, status, headers, ..
headers,
..
}, },
_, _,
) = response.into_parts(); ) = response.into_parts();
@@ -1172,7 +1165,9 @@ impl proto::Peer for Peer {
} }
fn convert_poll_message( fn convert_poll_message(
pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId pseudo: Pseudo,
fields: HeaderMap,
stream_id: StreamId,
) -> Result<Self::Poll, RecvError> { ) -> Result<Self::Poll, RecvError> {
use http::{uri, Version}; use http::{uri, Version};
@@ -1205,23 +1200,29 @@ impl proto::Peer for Peer {
// Convert the URI // Convert the URI
let mut parts = uri::Parts::default(); let mut parts = uri::Parts::default();
// A request translated from HTTP/1 must not include the :authority // A request translated from HTTP/1 must not include the :authority
// header // header
if let Some(authority) = pseudo.authority { if let Some(authority) = pseudo.authority {
let maybe_authority = uri::Authority::from_shared(authority.clone().into_inner()); let maybe_authority = uri::Authority::from_shared(authority.clone().into_inner());
parts.authority = Some(maybe_authority.or_else(|why| malformed!( parts.authority = Some(maybe_authority.or_else(|why| {
"malformed headers: malformed authority ({:?}): {}", authority, why, malformed!(
))?); "malformed headers: malformed authority ({:?}): {}",
authority,
why,
)
})?);
} }
// A :scheme is always required. // A :scheme is always required.
if let Some(scheme) = pseudo.scheme { if let Some(scheme) = pseudo.scheme {
let maybe_scheme = uri::Scheme::from_shared(scheme.clone().into_inner()); let maybe_scheme = uri::Scheme::from_shared(scheme.clone().into_inner());
let scheme = maybe_scheme.or_else(|why| malformed!( let scheme = maybe_scheme.or_else(|why| {
"malformed headers: malformed scheme ({:?}): {}", scheme, why, malformed!(
))?; "malformed headers: malformed scheme ({:?}): {}",
scheme,
why,
)
})?;
// It's not possible to build an `Uri` from a scheme and path. So, // It's not possible to build an `Uri` from a scheme and path. So,
// after validating is was a valid scheme, we just have to drop it // after validating is was a valid scheme, we just have to drop it
@@ -1240,9 +1241,9 @@ impl proto::Peer for Peer {
} }
let maybe_path = uri::PathAndQuery::from_shared(path.clone().into_inner()); let maybe_path = uri::PathAndQuery::from_shared(path.clone().into_inner());
parts.path_and_query = Some(maybe_path.or_else(|why| malformed!( parts.path_and_query = Some(maybe_path.or_else(|why| {
"malformed headers: malformed path ({:?}): {}", path, why, malformed!("malformed headers: malformed path ({:?}): {}", path, why,)
))?); })?);
} }
b.uri(parts); b.uri(parts);
@@ -1257,7 +1258,7 @@ impl proto::Peer for Peer {
id: stream_id, id: stream_id,
reason: Reason::PROTOCOL_ERROR, reason: Reason::PROTOCOL_ERROR,
}); });
}, }
}; };
*request.headers_mut() = fields; *request.headers_mut() = fields;
@@ -1270,18 +1271,15 @@ impl proto::Peer for Peer {
impl<T, B> fmt::Debug for Handshaking<T, B> impl<T, B> fmt::Debug for Handshaking<T, B>
where where
B: IntoBuf B: IntoBuf,
{ {
#[inline] fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { #[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self { match *self {
Handshaking::Flushing(_) => Handshaking::Flushing(_) => write!(f, "Handshaking::Flushing(_)"),
write!(f, "Handshaking::Flushing(_)"), Handshaking::ReadingPreface(_) => write!(f, "Handshaking::ReadingPreface(_)"),
Handshaking::ReadingPreface(_) => Handshaking::Empty => write!(f, "Handshaking::Empty"),
write!(f, "Handshaking::ReadingPreface(_)"),
Handshaking::Empty =>
write!(f, "Handshaking::Empty"),
} }
} }
} }
@@ -1290,18 +1288,19 @@ where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
B: IntoBuf, B: IntoBuf,
{ {
#[inline] fn from(flush: Flush<T, Prioritized<B::Buf>>) -> Self { #[inline]
fn from(flush: Flush<T, Prioritized<B::Buf>>) -> Self {
Handshaking::Flushing(flush) Handshaking::Flushing(flush)
} }
} }
impl<T, B> convert::From<ReadPreface<T, Prioritized<B::Buf>>> for impl<T, B> convert::From<ReadPreface<T, Prioritized<B::Buf>>> for Handshaking<T, B>
Handshaking<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
B: IntoBuf, B: IntoBuf,
{ {
#[inline] fn from(read: ReadPreface<T, Prioritized<B::Buf>>) -> Self { #[inline]
fn from(read: ReadPreface<T, Prioritized<B::Buf>>) -> Self {
Handshaking::ReadingPreface(read) Handshaking::ReadingPreface(read)
} }
} }
@@ -1311,7 +1310,8 @@ where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
B: IntoBuf, B: IntoBuf,
{ {
#[inline] fn from(codec: Codec<T, Prioritized<B::Buf>>) -> Self { #[inline]
fn from(codec: Codec<T, Prioritized<B::Buf>>) -> Self {
Handshaking::from(Flush::new(codec)) Handshaking::from(Flush::new(codec))
} }
} }

View File

@@ -3,10 +3,13 @@ use crate::frame::Reason;
use crate::proto::{self, WindowSize}; use crate::proto::{self, WindowSize};
use bytes::{Bytes, IntoBuf}; use bytes::{Bytes, IntoBuf};
use futures::{self, Poll, Async, try_ready}; use http::HeaderMap;
use http::{HeaderMap};
use crate::PollExt;
use futures::ready;
use std::fmt; use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Sends the body stream and trailers to the remote peer. /// Sends the body stream and trailers to the remote peer.
/// ///
@@ -264,11 +267,12 @@ impl<B: IntoBuf> SendStream<B> {
/// is sent. For example: /// is sent. For example:
/// ///
/// ```rust /// ```rust
/// #![feature(async_await)]
/// # use h2::*; /// # use h2::*;
/// # fn doc(mut send_stream: SendStream<&'static [u8]>) { /// # async fn doc(mut send_stream: SendStream<&'static [u8]>) {
/// send_stream.reserve_capacity(100); /// send_stream.reserve_capacity(100);
/// ///
/// let capacity = send_stream.poll_capacity(); /// let capacity = futures::future::poll_fn(|cx| send_stream.poll_capacity(cx)).await;
/// // capacity == 5; /// // capacity == 5;
/// ///
/// send_stream.send_data(b"hello", false).unwrap(); /// send_stream.send_data(b"hello", false).unwrap();
@@ -309,9 +313,11 @@ impl<B: IntoBuf> SendStream<B> {
/// amount of assigned capacity at that point in time. It is also possible /// amount of assigned capacity at that point in time. It is also possible
/// that `n` is lower than the previous call if, since then, the caller has /// that `n` is lower than the previous call if, since then, the caller has
/// sent data. /// sent data.
pub fn poll_capacity(&mut self) -> Poll<Option<usize>, crate::Error> { pub fn poll_capacity(&mut self, cx: &mut Context) -> Poll<Option<Result<usize, crate::Error>>> {
let res = try_ready!(self.inner.poll_capacity()); self.inner
Ok(Async::Ready(res.map(|v| v as usize))) .poll_capacity(cx)
.map_ok_(|w| w as usize)
.map_err_(Into::into)
} }
/// Sends a single data frame to the remote peer. /// Sends a single data frame to the remote peer.
@@ -356,7 +362,7 @@ impl<B: IntoBuf> SendStream<B> {
/// Polls to be notified when the client resets this stream. /// Polls to be notified when the client resets this stream.
/// ///
/// If stream is still open, this returns `Ok(Async::NotReady)`, and /// If stream is still open, this returns `Poll::Pending`, and
/// registers the task to be notified if a `RST_STREAM` is received. /// registers the task to be notified if a `RST_STREAM` is received.
/// ///
/// If a `RST_STREAM` frame is received for this stream, calling this /// If a `RST_STREAM` frame is received for this stream, calling this
@@ -366,8 +372,8 @@ impl<B: IntoBuf> SendStream<B> {
/// ///
/// If connection sees an error, this returns that error instead of a /// If connection sees an error, this returns that error instead of a
/// `Reason`. /// `Reason`.
pub fn poll_reset(&mut self) -> Poll<Reason, crate::Error> { pub fn poll_reset(&mut self, cx: &mut Context) -> Poll<Result<Reason, crate::Error>> {
self.inner.poll_reset(proto::PollReset::Streaming) self.inner.poll_reset(cx, proto::PollReset::Streaming)
} }
/// Returns the stream ID of this `SendStream`. /// Returns the stream ID of this `SendStream`.
@@ -417,8 +423,11 @@ impl RecvStream {
} }
/// Returns received trailers. /// Returns received trailers.
pub fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, crate::Error> { pub fn poll_trailers(
self.inner.inner.poll_trailers().map_err(Into::into) &mut self,
cx: &mut Context,
) -> Poll<Option<Result<HeaderMap, crate::Error>>> {
self.inner.inner.poll_trailers(cx).map_err_(Into::into)
} }
/// Returns the stream ID of this stream. /// Returns the stream ID of this stream.
@@ -432,11 +441,10 @@ impl RecvStream {
} }
impl futures::Stream for RecvStream { impl futures::Stream for RecvStream {
type Item = Bytes; type Item = Result<Bytes, crate::Error>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.inner.inner.poll_data().map_err(Into::into) self.inner.inner.poll_data(cx).map_err_(Into::into)
} }
} }
@@ -514,9 +522,7 @@ impl Clone for ReleaseCapacity {
impl PingPong { impl PingPong {
pub(crate) fn new(inner: proto::UserPings) -> Self { pub(crate) fn new(inner: proto::UserPings) -> Self {
PingPong { PingPong { inner }
inner,
}
} }
/// Send a `PING` frame to the peer. /// Send a `PING` frame to the peer.
@@ -540,12 +546,10 @@ impl PingPong {
// just drop it. // just drop it.
drop(ping); drop(ping);
self.inner self.inner.send_ping().map_err(|err| match err {
.send_ping() Some(err) => err.into(),
.map_err(|err| match err { None => UserError::SendPingWhilePending.into(),
Some(err) => err.into(), })
None => UserError::SendPingWhilePending.into()
})
} }
/// Polls for the acknowledgement of a previously [sent][] `PING` frame. /// Polls for the acknowledgement of a previously [sent][] `PING` frame.
@@ -553,8 +557,8 @@ impl PingPong {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// # use futures::Future; /// #![feature(async_await)]
/// # fn doc(mut ping_pong: h2::PingPong) { /// # async fn doc(mut ping_pong: h2::PingPong) {
/// // let mut ping_pong = ... /// // let mut ping_pong = ...
/// ///
/// // First, send a PING. /// // First, send a PING.
@@ -563,26 +567,23 @@ impl PingPong {
/// .unwrap(); /// .unwrap();
/// ///
/// // And then wait for the PONG. /// // And then wait for the PONG.
/// futures::future::poll_fn(move || { /// futures::future::poll_fn(move |cx| {
/// ping_pong.poll_pong() /// ping_pong.poll_pong(cx)
/// }).wait().unwrap(); /// }).await.unwrap();
/// # } /// # }
/// # fn main() {} /// # fn main() {}
/// ``` /// ```
/// ///
/// [sent]: struct.PingPong.html#method.send_ping /// [sent]: struct.PingPong.html#method.send_ping
pub fn poll_pong(&mut self) -> Poll<Pong, crate::Error> { pub fn poll_pong(&mut self, cx: &mut Context) -> Poll<Result<Pong, crate::Error>> {
try_ready!(self.inner.poll_pong()); ready!(self.inner.poll_pong(cx))?;
Ok(Async::Ready(Pong { Poll::Ready(Ok(Pong { _p: () }))
_p: (),
}))
} }
} }
impl fmt::Debug for PingPong { impl fmt::Debug for PingPong {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("PingPong") fmt.debug_struct("PingPong").finish()
.finish()
} }
} }
@@ -595,16 +596,13 @@ impl Ping {
/// ///
/// [`PingPong`]: struct.PingPong.html /// [`PingPong`]: struct.PingPong.html
pub fn opaque() -> Ping { pub fn opaque() -> Ping {
Ping { Ping { _p: () }
_p: (),
}
} }
} }
impl fmt::Debug for Ping { impl fmt::Debug for Ping {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Ping") fmt.debug_struct("Ping").finish()
.finish()
} }
} }
@@ -612,7 +610,6 @@ impl fmt::Debug for Ping {
impl fmt::Debug for Pong { impl fmt::Debug for Pong {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Pong") fmt.debug_struct("Pong").finish()
.finish()
} }
} }