refactor(http2): re-enable http2 client and server support

This commit is contained in:
Sean McArthur
2019-08-19 14:03:05 -07:00
parent 4920f5e264
commit 41f4173615
11 changed files with 323 additions and 369 deletions

View File

@@ -29,7 +29,7 @@ futures-util-preview = { version = "0.3.0-alpha.17" }
http = "0.1.15" http = "0.1.15"
http-body = "0.1" http-body = "0.1"
httparse = "1.0" httparse = "1.0"
h2 = "0.1.10" h2 = { git = "https://github.com/hyperium/h2" }
iovec = "0.1" iovec = "0.1"
itoa = "0.4.1" itoa = "0.4.1"
log = "0.4" log = "0.4"

View File

@@ -64,18 +64,14 @@ fn http1_parallel_x10_req_10mb(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[ignore]
fn http2_get(b: &mut test::Bencher) { fn http2_get(b: &mut test::Bencher) {
// FIXME: re-implement tests when `h2` upgrades to `async/await`
opts() opts()
.http2() .http2()
.bench(b) .bench(b)
} }
#[bench] #[bench]
#[ignore]
fn http2_post(b: &mut test::Bencher) { fn http2_post(b: &mut test::Bencher) {
// FIXME: re-implement tests when `h2` upgrades to `async/await`
opts() opts()
.http2() .http2()
.method(Method::POST) .method(Method::POST)
@@ -84,9 +80,7 @@ fn http2_post(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[ignore]
fn http2_req_100kb(b: &mut test::Bencher) { fn http2_req_100kb(b: &mut test::Bencher) {
// FIXME: re-implement tests when `h2` upgrades to `async/await`
let body = &[b'x'; 1024 * 100]; let body = &[b'x'; 1024 * 100];
opts() opts()
.http2() .http2()
@@ -96,9 +90,7 @@ fn http2_req_100kb(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[ignore]
fn http2_parallel_x10_empty(b: &mut test::Bencher) { fn http2_parallel_x10_empty(b: &mut test::Bencher) {
// FIXME: re-implement tests when `h2` upgrades to `async/await`
opts() opts()
.http2() .http2()
.parallel(10) .parallel(10)
@@ -106,9 +98,7 @@ fn http2_parallel_x10_empty(b: &mut test::Bencher) {
} }
#[bench] #[bench]
#[ignore]
fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) { fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
// FIXME: re-implement tests when `h2` upgrades to `async/await`
let body = &[b'x'; 1024 * 1024 * 10]; let body = &[b'x'; 1024 * 1024 * 10];
opts() opts()
.http2() .http2()

View File

@@ -269,25 +269,17 @@ impl Body {
} }
None => Poll::Ready(None), None => Poll::Ready(None),
} }
} },
Kind::H2 { Kind::H2 {
/*recv: ref mut h2,*/ .. recv: ref mut h2, ..
} => { } => match ready!(Pin::new(&mut *h2).poll_next(cx)) {
unimplemented!("h2.poll_inner"); Some(Ok(bytes)) => {
/* let _ = h2.release_capacity().release_capacity(bytes.len());
h2 Poll::Ready(Some(Ok(Chunk::from(bytes))))
.poll() },
.map(|r#async| { Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
r#async.map(|opt| { None => Poll::Ready(None),
opt.map(|bytes| { },
let _ = h2.release_capacity().release_capacity(bytes.len());
Chunk::from(bytes)
})
})
})
.map_err(crate::Error::new_body)
*/
}
Kind::Wrapped(ref mut s) => { Kind::Wrapped(ref mut s) => {
match ready!(s.as_mut().poll_next(cx)) { match ready!(s.as_mut().poll_next(cx)) {
Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))), Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))),
@@ -314,11 +306,12 @@ impl Payload for Body {
self.poll_eof(cx) self.poll_eof(cx)
} }
fn poll_trailers(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Result<HeaderMap, Self::Error>>> { fn poll_trailers(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Result<HeaderMap, Self::Error>>> {
match self.kind { match self.kind {
Kind::H2 { /*recv: ref mut h2,*/ .. } => { Kind::H2 { recv: ref mut h2, .. } => match ready!(h2.poll_trailers(cx)) {
unimplemented!("h2.poll_trailers"); Some(Ok(t)) => Poll::Ready(Some(Ok(t))),
//h2.poll_trailers().map_err(crate::Error::new_h2) Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_h2(e)))),
None => Poll::Ready(None),
}, },
_ => Poll::Ready(None), _ => Poll::Ready(None),
} }

View File

@@ -32,18 +32,19 @@ type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<
>; >;
type ConnEither<T, B> = Either< type ConnEither<T, B> = Either<
Http1Dispatcher<T, B, proto::h1::ClientTransaction>, Http1Dispatcher<T, B, proto::h1::ClientTransaction>,
proto::h2::Client<T, B>, proto::h2::ClientTask<B>,
>; >;
/// Returns a `Handshake` future over some IO. /// Returns a handshake future over some IO.
/// ///
/// This is a shortcut for `Builder::new().handshake(io)`. /// This is a shortcut for `Builder::new().handshake(io)`.
pub fn handshake<T>(io: T) -> Handshake<T, crate::Body> pub async fn handshake<T>(io: T) -> crate::Result<(SendRequest<crate::Body>, Connection<T, crate::Body>)>
where where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{ {
Builder::new() Builder::new()
.handshake(io) .handshake(io)
.await
} }
/// The sender side of an established connection. /// The sender side of an established connection.
@@ -68,7 +69,7 @@ where
/// A builder to configure an HTTP connection. /// A builder to configure an HTTP connection.
/// ///
/// After setting options, the builder is used to create a `Handshake` future. /// After setting options, the builder is used to create a handshake future.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Builder { pub struct Builder {
pub(super) exec: Exec, pub(super) exec: Exec,
@@ -80,16 +81,6 @@ pub struct Builder {
h2_builder: h2::client::Builder, h2_builder: h2::client::Builder,
} }
/// A future setting up HTTP over an IO object.
///
/// If successful, yields a `(SendRequest, Connection)` pair.
#[must_use = "futures do nothing unless polled"]
pub struct Handshake<T, B> {
builder: Builder,
io: Option<T>,
_marker: PhantomData<fn(B)>,
}
/// A future returned by `SendRequest::send_request`. /// A future returned by `SendRequest::send_request`.
/// ///
/// Yields a `Response` if successful. /// Yields a `Response` if successful.
@@ -334,7 +325,8 @@ impl<B> Clone for Http2SendRequest<B> {
impl<T, B> Connection<T, B> impl<T, B> Connection<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static, B: Payload + Unpin + 'static,
B::Data: Unpin,
{ {
/// Return the inner IO object, and additional information. /// Return the inner IO object, and additional information.
/// ///
@@ -365,29 +357,20 @@ where
/// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
/// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
/// to work with this function; or use the `without_shutdown` wrapper. /// to work with this function; or use the `without_shutdown` wrapper.
pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
where
B: Unpin,
{
match self.inner.as_mut().expect("already upgraded") { match self.inner.as_mut().expect("already upgraded") {
&mut Either::Left(ref mut h1) => { &mut Either::Left(ref mut h1) => {
h1.poll_without_shutdown(cx) h1.poll_without_shutdown(cx)
}, },
&mut Either::Right(ref mut h2) => { &mut Either::Right(ref mut h2) => {
unimplemented!("h2 poll_without_shutdown"); Pin::new(h2).poll(cx).map_ok(|_| ())
/*
h2.poll().map(|x| x.map(|_| ()))
*/
} }
} }
} }
/// Prevent shutdown of the underlying IO object at the end of service the request, /// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
pub fn without_shutdown(self) -> impl Future<Output=crate::Result<Parts<T>>> pub fn without_shutdown(self) -> impl Future<Output=crate::Result<Parts<T>>> {
where
B: Unpin,
{
let mut conn = Some(self); let mut conn = Some(self);
future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> { future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
@@ -400,6 +383,7 @@ impl<T, B> Future for Connection<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + Unpin + 'static, B: Payload + Unpin + 'static,
B::Data: Unpin,
{ {
type Output = crate::Result<()>; type Output = crate::Result<()>;
@@ -522,70 +506,46 @@ impl Builder {
} }
/// Constructs a connection with the configured options and IO. /// Constructs a connection with the configured options and IO.
#[inline] pub async fn handshake<T, B>(self, io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static, T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static, B: Payload + 'static,
B::Data: Unpin,
{ {
trace!("client handshake HTTP/{}", if self.http2 { 2 } else { 1 }); trace!("client handshake HTTP/{}", if self.http2 { 2 } else { 1 });
Handshake {
builder: self.clone(),
io: Some(io),
_marker: PhantomData,
}
}
}
// ===== impl Handshake
impl<T, B> Future for Handshake<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static,
{
type Output = crate::Result<(SendRequest<B>, Connection<T, B>)>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let io = self.io.take().expect("polled more than once");
let (tx, rx) = dispatch::channel(); let (tx, rx) = dispatch::channel();
let either = if !self.builder.http2 { let either = if !self.http2 {
let mut conn = proto::Conn::new(io); let mut conn = proto::Conn::new(io);
if !self.builder.h1_writev { if !self.h1_writev {
conn.set_write_strategy_flatten(); conn.set_write_strategy_flatten();
} }
if self.builder.h1_title_case_headers { if self.h1_title_case_headers {
conn.set_title_case_headers(); conn.set_title_case_headers();
} }
if let Some(sz) = self.builder.h1_read_buf_exact_size { if let Some(sz) = self.h1_read_buf_exact_size {
conn.set_read_buf_exact_size(sz); conn.set_read_buf_exact_size(sz);
} }
if let Some(max) = self.builder.h1_max_buf_size { if let Some(max) = self.h1_max_buf_size {
conn.set_max_buf_size(max); conn.set_max_buf_size(max);
} }
let cd = proto::h1::dispatch::Client::new(rx); let cd = proto::h1::dispatch::Client::new(rx);
let dispatch = proto::h1::Dispatcher::new(cd, conn); let dispatch = proto::h1::Dispatcher::new(cd, conn);
Either::Left(dispatch) Either::Left(dispatch)
} else { } else {
let h2 = proto::h2::Client::new(io, rx, &self.builder.h2_builder, self.builder.exec.clone()); let h2 = proto::h2::client::handshake(io, rx, &self.h2_builder, self.exec.clone())
.await?;
Either::Right(h2) Either::Right(h2)
}; };
Poll::Ready(Ok(( Ok((
SendRequest { SendRequest {
dispatch: tx, dispatch: tx,
}, },
Connection { Connection {
inner: Some(either), inner: Some(either),
}, },
))) ))
}
}
impl<T, B> fmt::Debug for Handshake<T, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Handshake")
.finish()
} }
} }

View File

@@ -163,7 +163,7 @@ where C: Connect + Sync + 'static,
C::Transport: 'static, C::Transport: 'static,
C::Future: 'static, C::Future: 'static,
B: Payload + Unpin + Send + 'static, B: Payload + Unpin + Send + 'static,
B::Data: Send, B::Data: Send + Unpin,
{ {
/// Send a `GET` request to the supplied `Uri`. /// Send a `GET` request to the supplied `Uri`.
/// ///
@@ -512,8 +512,10 @@ where C: Connect + Sync + 'static,
connecting connecting
}; };
let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
Either::Left(conn_builder Either::Left(Box::pin(conn_builder
.http2_only(is_h2) .http2_only(is_h2)
// TODO: convert client::conn::Builder to be by-value?
.clone()
.handshake(io) .handshake(io)
.and_then(move |(tx, conn)| { .and_then(move |(tx, conn)| {
trace!("handshake complete, spawning background dispatcher task"); trace!("handshake complete, spawning background dispatcher task");
@@ -541,7 +543,7 @@ where C: Connect + Sync + 'static,
PoolTx::Http1(tx) PoolTx::Http1(tx)
}, },
}) })
})) })))
})) }))
}) })
} }

View File

@@ -1,8 +1,11 @@
use bytes::IntoBuf; use bytes::IntoBuf;
//use futures::{Async, Future, Poll, Stream}; use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, FutureExt as _, Either};
use futures_util::stream::StreamExt as _;
use futures_util::try_future::TryFutureExt as _;
//use futures::future::{self, Either}; //use futures::future::{self, Either};
//use futures::sync::{mpsc, oneshot}; //use futures::sync::{mpsc, oneshot};
use h2::client::{Builder, Handshake, SendRequest}; use h2::client::{Builder, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all; use crate::headers::content_length_parse_all;
@@ -14,192 +17,187 @@ use super::{PipeToSendStream, SendBuf};
use crate::{Body, Request, Response}; use crate::{Body, Request, Response};
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>; type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
///// An mpsc channel is used to help notify the `Connection` task when *all* ///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown. ///// other handles to it have been dropped, so that it can shutdown.
//type ConnDropRef = mpsc::Sender<Never>; type ConnDropRef = mpsc::Sender<Never>;
///// A oneshot channel watches the `Connection` task, and when it completes, ///// A oneshot channel watches the `Connection` task, and when it completes,
///// the "dispatch" task will be notified and can shutdown sooner. ///// the "dispatch" task will be notified and can shutdown sooner.
//type ConnEof = oneshot::Receiver<Never>; type ConnEof = oneshot::Receiver<Never>;
pub(crate) struct Client<T, B> pub(crate) async fn handshake<T, B>(
io: T,
req_rx: ClientRx<B>,
builder: &Builder,
exec: Exec,
) -> crate::Result<ClientTask<B>>
where where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
B: Payload, B: Payload,
B::Data: Unpin,
{ {
executor: Exec, let (h2_tx, conn) = builder
rx: ClientRx<B>, .handshake::<_, SendBuf<B::Data>>(io)
state: State<T, SendBuf<B::Data>>, .await
} .map_err(crate::Error::new_h2)?;
enum State<T, B> where B: IntoBuf { // An mpsc channel is used entirely to detect when the
Handshaking(Handshake<T, B>), // 'Client' has been dropped. This is to get around a bug
//Ready(SendRequest<B>, ConnDropRef, ConnEof), // in h2 where dropping all SendRequests won't notify a
} // parked Connection.
let (conn_drop_ref, rx) = mpsc::channel(1);
let (cancel_tx, conn_eof) = oneshot::channel();
impl<T, B> Client<T, B> let conn_drop_rx = rx.into_future()
where .map(|(item, _rx)| {
T: AsyncRead + AsyncWrite + Send + 'static, match item {
B: Payload, Some(never) => match never {},
{ None => (),
pub(crate) fn new(io: T, rx: ClientRx<B>, builder: &Builder, exec: Exec) -> Client<T, B> { }
unimplemented!("proto::h2::Client::new"); });
/*
let handshake = builder.handshake(io);
Client { let conn = conn.map_err(|e| debug!("connection error: {}", e));
executor: exec,
rx: rx, let conn_task = async move {
state: State::Handshaking(handshake), match future::select(conn, conn_drop_rx).await {
Either::Left(_) => {
// ok or err, the `conn` has finished
}
Either::Right(((), conn)) => {
// mpsc has been dropped, hopefully polling
// the connection some more should start shutdown
// and then close
trace!("send_request dropped, starting conn shutdown");
drop(cancel_tx);
let _ = conn.await;
}
} }
*/ };
}
exec.execute(conn_task)?;
Ok(ClientTask {
conn_drop_ref,
conn_eof,
executor: exec,
h2_tx,
req_rx,
})
} }
impl<T, B> Future for Client<T, B> pub(crate) struct ClientTask<B>
where where
T: AsyncRead + AsyncWrite + Send + 'static, B: Payload,
B: Payload + 'static, {
conn_drop_ref: ConnDropRef,
conn_eof: ConnEof,
executor: Exec,
h2_tx: SendRequest<SendBuf<B::Data>>,
req_rx: ClientRx<B>,
}
impl<B> Future for ClientTask<B>
where
B: Payload + Unpin + 'static,
B::Data: Unpin,
{ {
type Output = crate::Result<Dispatched>; type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for proto::h2::Client");
/*
loop { loop {
let next = match self.state { match ready!(self.h2_tx.poll_ready(cx)) {
State::Handshaking(ref mut h) => { Ok(()) => (),
let (request_tx, conn) = try_ready!(h.poll().map_err(crate::Error::new_h2)); Err(err) => {
// An mpsc channel is used entirely to detect when the return if err.reason() == Some(::h2::Reason::NO_ERROR) {
// 'Client' has been dropped. This is to get around a bug trace!("connection gracefully shutdown");
// in h2 where dropping all SendRequests won't notify a Poll::Ready(Ok(Dispatched::Shutdown))
// parked Connection. } else {
let (tx, rx) = mpsc::channel(0); Poll::Ready(Err(crate::Error::new_h2(err)))
let (cancel_tx, cancel_rx) = oneshot::channel(); };
let rx = rx.into_future() }
.map(|(msg, _)| match msg { };
Some(never) => match never {},
None => (), match Pin::new(&mut self.req_rx).poll_next(cx) {
}) Poll::Ready(Some((req, cb))) => {
.map_err(|_| -> Never { unreachable!("mpsc cannot error") }); // check that future hasn't been canceled already
let fut = conn if cb.is_canceled() {
.inspect(move |_| { trace!("request callback is canceled");
drop(cancel_tx); continue;
trace!("connection complete") }
}) let (head, body) = req.into_parts();
.map_err(|e| debug!("connection error: {}", e)) let mut req = ::http::Request::from_parts(head, ());
.select2(rx) super::strip_connection_headers(req.headers_mut(), true);
.then(|res| match res { if let Some(len) = body.content_length() {
Ok(Either::A(((), _))) | headers::set_content_length_if_missing(req.headers_mut(), len);
Err(Either::A(((), _))) => { }
// conn has finished either way let eos = body.is_end_stream();
Either::A(future::ok(())) let (fut, body_tx) = match self.h2_tx.send_request(req, eos) {
}, Ok(ok) => ok,
Ok(Either::B(((), conn))) => {
// mpsc has been dropped, hopefully polling
// the connection some more should start shutdown
// and then close
trace!("send_request dropped, starting conn shutdown");
Either::B(conn)
}
Err(Either::B((never, _))) => match never {},
});
self.executor.execute(fut)?;
State::Ready(request_tx, tx, cancel_rx)
},
State::Ready(ref mut tx, ref conn_dropper, ref mut cancel_rx) => {
match tx.poll_ready() {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => { Err(err) => {
return if err.reason() == Some(::h2::Reason::NO_ERROR) { debug!("client send request error: {}", err);
trace!("connection gracefully shutdown"); cb.send(Err((crate::Error::new_h2(err), None)));
Ok(Async::Ready(Dispatched::Shutdown)) continue;
} else { }
Err(crate::Error::new_h2(err)) };
};
if !eos {
let mut pipe = PipeToSendStream::new(body, body_tx)
.map(|res| {
if let Err(e) = res {
debug!("client request body error: {}", e);
}
});
// eagerly see if the body pipe is ready and
// can thus skip allocating in the executor
match Pin::new(&mut pipe).poll(cx) {
Poll::Ready(_) => (),
Poll::Pending => {
let conn_drop_ref = self.conn_drop_ref.clone();
let pipe = pipe.map(move |x| {
drop(conn_drop_ref);
x
});
self.executor.execute(pipe)?;
}
} }
} }
match self.rx.poll() {
Ok(Async::Ready(Some((req, cb)))) => { let fut = fut
// check that future hasn't been canceled already .map(move |result| {
if cb.is_canceled() { match result {
trace!("request callback is canceled"); Ok(res) => {
continue; let content_length = content_length_parse_all(res.headers());
} let res = res.map(|stream|
let (head, body) = req.into_parts(); crate::Body::h2(stream, content_length));
let mut req = ::http::Request::from_parts(head, ()); Ok(res)
super::strip_connection_headers(req.headers_mut(), true); },
if let Some(len) = body.content_length() {
headers::set_content_length_if_missing(req.headers_mut(), len);
}
let eos = body.is_end_stream();
let (fut, body_tx) = match tx.send_request(req, eos) {
Ok(ok) => ok,
Err(err) => { Err(err) => {
debug!("client send request error: {}", err); debug!("client response error: {}", err);
cb.send(Err((crate::Error::new_h2(err), None))); Err((crate::Error::new_h2(err), None))
continue;
}
};
if !eos {
let mut pipe = PipeToSendStream::new(body, body_tx)
.map_err(|e| debug!("client request body error: {}", e));
// eagerly see if the body pipe is ready and
// can thus skip allocating in the executor
match pipe.poll() {
Ok(Async::Ready(())) | Err(()) => (),
Ok(Async::NotReady) => {
let conn_drop_ref = conn_dropper.clone();
let pipe = pipe.then(move |x| {
drop(conn_drop_ref);
x
});
self.executor.execute(pipe)?;
}
} }
} }
});
self.executor.execute(cb.send_when(fut))?;
continue;
},
let fut = fut Poll::Ready(None) => {
.then(move |result| { trace!("client::dispatch::Sender dropped");
match result { return Poll::Ready(Ok(Dispatched::Shutdown));
Ok(res) => { }
let content_length = content_length_parse_all(res.headers());
let res = res.map(|stream|
crate::Body::h2(stream, content_length));
Ok(res)
},
Err(err) => {
debug!("client response error: {}", err);
Err((crate::Error::new_h2(err), None))
}
}
});
self.executor.execute(cb.send_when(fut))?;
continue;
},
Ok(Async::NotReady) => { Poll::Pending => {
match cancel_rx.poll() { match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
Ok(Async::Ready(never)) => match never {}, Ok(never) => match never {},
Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_conn_is_eof) => {
Err(_conn_is_eof) => { trace!("connection task is closed, closing dispatch task");
trace!("connection task is closed, closing dispatch task"); return Poll::Ready(Ok(Dispatched::Shutdown));
return Ok(Async::Ready(Dispatched::Shutdown)); }
}
}
},
Ok(Async::Ready(None)) => {
trace!("client::dispatch::Sender dropped");
return Ok(Async::Ready(Dispatched::Shutdown));
},
Err(never) => match never {},
} }
}, },
}; }
self.state = next;
} }
*/
} }
} }

View File

@@ -1,5 +1,4 @@
use bytes::Buf; use bytes::Buf;
//use futures::{Async, Future, Poll};
use h2::{SendStream}; use h2::{SendStream};
use http::header::{ use http::header::{
HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER,
@@ -8,11 +7,12 @@ use http::header::{
use http::HeaderMap; use http::HeaderMap;
use crate::body::Payload; use crate::body::Payload;
use crate::common::{Future, Pin, Poll, task};
mod client; pub(crate) mod client;
pub(crate) mod server; pub(crate) mod server;
pub(crate) use self::client::Client; pub(crate) use self::client::ClientTask;
pub(crate) use self::server::Server; pub(crate) use self::server::Server;
fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
@@ -106,17 +106,13 @@ where
} }
} }
/*
impl<S> Future for PipeToSendStream<S> impl<S> Future for PipeToSendStream<S>
where where
S: Payload, S: Payload + Unpin,
{ {
type Item = (); type Output = crate::Result<()>;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for PipeToSendStream");
/*
loop { loop {
if !self.data_done { if !self.data_done {
// we don't have the next chunk of data yet, so just reserve 1 byte to make // we don't have the next chunk of data yet, so just reserve 1 byte to make
@@ -126,23 +122,25 @@ where
if self.body_tx.capacity() == 0 { if self.body_tx.capacity() == 0 {
loop { loop {
match try_ready!(self.body_tx.poll_capacity().map_err(crate::Error::new_body_write)) { match ready!(self.body_tx.poll_capacity(cx)) {
Some(0) => {}
Some(_) => break, Some(Ok(0)) => {},
None => return Err(crate::Error::new_canceled()), Some(Ok(_)) => break,
Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))) ,
None => return Poll::Ready(Err(crate::Error::new_canceled())),
} }
} }
} else { } else {
if let Async::Ready(reason) = if let Poll::Ready(reason) =
self.body_tx.poll_reset().map_err(crate::Error::new_body_write)? self.body_tx.poll_reset(cx).map_err(crate::Error::new_body_write)?
{ {
debug!("stream received RST_STREAM: {:?}", reason); debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_body_write(::h2::Error::from(reason))); return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
} }
} }
match try_ready!(self.stream.poll_data().map_err(|e| self.on_user_err(e))) { match ready!(Pin::new(&mut self.stream).poll_data(cx)) {
Some(chunk) => { Some(Ok(chunk)) => {
let is_eos = self.stream.is_end_stream(); let is_eos = self.stream.is_end_stream();
trace!( trace!(
"send body chunk: {} bytes, eos={}", "send body chunk: {} bytes, eos={}",
@@ -156,14 +154,15 @@ where
.map_err(crate::Error::new_body_write)?; .map_err(crate::Error::new_body_write)?;
if is_eos { if is_eos {
return Ok(Async::Ready(())); return Poll::Ready(Ok(()));
} }
} }
Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => { None => {
self.body_tx.reserve_capacity(0); self.body_tx.reserve_capacity(0);
let is_eos = self.stream.is_end_stream(); let is_eos = self.stream.is_end_stream();
if is_eos { if is_eos {
return self.send_eos_frame().map(Async::Ready); return Poll::Ready(self.send_eos_frame());
} else { } else {
self.data_done = true; self.data_done = true;
// loop again to poll_trailers // loop again to poll_trailers
@@ -171,31 +170,30 @@ where
} }
} }
} else { } else {
if let Async::Ready(reason) = if let Poll::Ready(reason) =
self.body_tx.poll_reset().map_err(|e| crate::Error::new_body_write(e))? self.body_tx.poll_reset(cx).map_err(|e| crate::Error::new_body_write(e))?
{ {
debug!("stream received RST_STREAM: {:?}", reason); debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_body_write(::h2::Error::from(reason))); return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
} }
match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_user_err(e))) { match ready!(Pin::new(&mut self.stream).poll_trailers(cx)) {
Some(trailers) => { Some(Ok(trailers)) => {
self.body_tx self.body_tx
.send_trailers(trailers) .send_trailers(trailers)
.map_err(crate::Error::new_body_write)?; .map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(())); return Poll::Ready(Ok(()));
} }
Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => { None => {
// There were no trailers, so send an empty DATA frame... // There were no trailers, so send an empty DATA frame...
return self.send_eos_frame().map(Async::Ready); return Poll::Ready(self.send_eos_frame());
} }
} }
} }
} }
*/
} }
} }
*/
struct SendBuf<B>(Option<B>); struct SendBuf<B>(Option<B>);

View File

@@ -1,5 +1,7 @@
use std::error::Error as StdError; use std::error::Error as StdError;
use std::marker::Unpin;
use futures_core::Stream;
use h2::Reason; use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse}; use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
@@ -49,27 +51,23 @@ where
impl<T, S, B, E> Server<T, S, B, E> impl<T, S, B, E> Server<T, S, B, E>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>, S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
pub(crate) fn new(io: T, service: S, builder: &Builder, exec: E) -> Server<T, S, B, E> { pub(crate) fn new(io: T, service: S, builder: &Builder, exec: E) -> Server<T, S, B, E> {
unimplemented!("proto::h2::Server::new")
/*
let handshake = builder.handshake(io); let handshake = builder.handshake(io);
Server { Server {
exec, exec,
state: State::Handshaking(handshake), state: State::Handshaking(handshake),
service, service,
} }
*/
} }
pub fn graceful_shutdown(&mut self) { pub fn graceful_shutdown(&mut self) {
unimplemented!("proto::h2::Server::graceful_shutdown")
/*
trace!("graceful_shutdown"); trace!("graceful_shutdown");
match self.state { match self.state {
State::Handshaking(..) => { State::Handshaking(..) => {
@@ -86,54 +84,53 @@ where
} }
} }
self.state = State::Closed; self.state = State::Closed;
*/
} }
} }
impl<T, S, B, E> Future for Server<T, S, B, E> impl<T, S, B, E> Future for Server<T, S, B, E>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>, S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
type Output = crate::Result<Dispatched>; type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("h2 server future") let me = &mut *self;
/*
loop { loop {
let next = match self.state { let next = match me.state {
State::Handshaking(ref mut h) => { State::Handshaking(ref mut h) => {
let conn = try_ready!(h.poll().map_err(crate::Error::new_h2)); let conn = ready!(Pin::new(h).poll(cx).map_err(crate::Error::new_h2))?;
State::Serving(Serving { State::Serving(Serving {
conn, conn,
closing: None, closing: None,
}) })
}, },
State::Serving(ref mut srv) => { State::Serving(ref mut srv) => {
try_ready!(srv.poll_server(&mut self.service, &self.exec)); ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?;
return Ok(Async::Ready(Dispatched::Shutdown)); return Poll::Ready(Ok(Dispatched::Shutdown));
} }
State::Closed => { State::Closed => {
// graceful_shutdown was called before handshaking finished, // graceful_shutdown was called before handshaking finished,
// nothing to do here... // nothing to do here...
return Ok(Async::Ready(Dispatched::Shutdown)); return Poll::Ready(Ok(Dispatched::Shutdown));
} }
}; };
self.state = next; me.state = next;
} }
*/
} }
} }
impl<T, B> Serving<T, B> impl<T, B> Serving<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: Payload, B: Payload,
B::Data: Unpin,
{ {
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<crate::Result<()>> fn poll_server<S, E>(&mut self, cx: &mut task::Context<'_>, service: &mut S, exec: &mut E) -> Poll<crate::Result<()>>
where where
S: Service< S: Service<
ReqBody=Body, ReqBody=Body,
@@ -142,19 +139,18 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
/*
if self.closing.is_none() { if self.closing.is_none() {
loop { loop {
// At first, polls the readiness of supplied service. // At first, polls the readiness of supplied service.
match service.poll_ready() { match service.poll_ready(cx) {
Ok(Async::Ready(())) => (), Poll::Ready(Ok(())) => (),
Ok(Async::NotReady) => { Poll::Pending => {
// use `poll_close` instead of `poll`, in order to avoid accepting a request. // use `poll_close` instead of `poll`, in order to avoid accepting a request.
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2)); ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
trace!("incoming connection complete"); trace!("incoming connection complete");
return Ok(Async::Ready(())); return Poll::Ready(Ok(()));
} }
Err(err) => { Poll::Ready(Err(err)) => {
let err = crate::Error::new_user_service(err); let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err); debug!("service closed: {}", err);
@@ -173,29 +169,33 @@ where
} }
// When the service is ready, accepts an incoming request. // When the service is ready, accepts an incoming request.
if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(crate::Error::new_h2)) { match ready!(Pin::new(&mut self.conn).poll_next(cx)) {
trace!("incoming request"); Some(Ok((req, respond))) => {
let content_length = content_length_parse_all(req.headers()); trace!("incoming request");
let req = req.map(|stream| { let content_length = content_length_parse_all(req.headers());
crate::Body::h2(stream, content_length) let req = req.map(|stream| {
}); crate::Body::h2(stream, content_length)
let fut = H2Stream::new(service.call(req), respond); });
exec.execute_h2stream(fut)?; let fut = H2Stream::new(service.call(req), respond);
} else { exec.execute_h2stream(fut)?;
// no more incoming streams... },
trace!("incoming connection complete"); Some(Err(e)) => {
return Ok(Async::Ready(())) return Poll::Ready(Err(crate::Error::new_h2(e)));
},
None => {
// no more incoming streams...
trace!("incoming connection complete");
return Poll::Ready(Ok(()));
},
} }
} }
} }
debug_assert!(self.closing.is_some(), "poll_server broke loop without closing"); debug_assert!(self.closing.is_some(), "poll_server broke loop without closing");
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2)); ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
Err(self.closing.take().expect("polled after error")) Poll::Ready(Err(self.closing.take().expect("polled after error")))
*/
unimplemented!("h2 server poll_server")
} }
} }
@@ -230,38 +230,37 @@ where
} }
} }
impl<F, B> Future for H2Stream<F, B> impl<F, B, E> H2Stream<F, B>
where where
//F: Future<Item=Response<B>>, F: Future<Output = Result<Response<B>, E>>,
//F::Error: Into<Box<dyn StdError + Send + Sync>>, B: Payload + Unpin,
B: Payload, B::Data: Unpin,
E: Into<Box<dyn StdError + Send + Sync>>,
{ {
type Output = (); fn poll2(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Safety: State::{Service, Body} futures are never moved
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { let me = unsafe { self.get_unchecked_mut() };
unimplemented!("impl Future for H2Stream");
/*
loop { loop {
let next = match self.state { let next = match me.state {
H2StreamState::Service(ref mut h) => { H2StreamState::Service(ref mut h) => {
let res = match h.poll() { let res = match unsafe { Pin::new_unchecked(h) }.poll(cx) {
Ok(Async::Ready(r)) => r, Poll::Ready(Ok(r)) => r,
Ok(Async::NotReady) => { Poll::Pending => {
// Body is not yet ready, so we want to check if the client has sent a // Response is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request. // RST_STREAM frame which would cancel the current request.
if let Async::Ready(reason) = if let Poll::Ready(reason) =
self.reply.poll_reset().map_err(|e| crate::Error::new_h2(e))? me.reply.poll_reset(cx).map_err(|e| crate::Error::new_h2(e))?
{ {
debug!("stream received RST_STREAM: {:?}", reason); debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_h2(reason.into())); return Poll::Ready(Err(crate::Error::new_h2(reason.into())));
} }
return Ok(Async::NotReady); return Poll::Pending;
} }
Err(e) => { Poll::Ready(Err(e)) => {
let err = crate::Error::new_user_service(e); let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err); warn!("http2 service errored: {}", err);
self.reply.send_reset(err.h2_reason()); me.reply.send_reset(err.h2_reason());
return Err(err); return Poll::Ready(Err(err));
}, },
}; };
@@ -278,12 +277,12 @@ where
macro_rules! reply { macro_rules! reply {
($eos:expr) => ({ ($eos:expr) => ({
match self.reply.send_response(res, $eos) { match me.reply.send_response(res, $eos) {
Ok(tx) => tx, Ok(tx) => tx,
Err(e) => { Err(e) => {
debug!("send response error: {}", e); debug!("send response error: {}", e);
self.reply.send_reset(Reason::INTERNAL_ERROR); me.reply.send_reset(Reason::INTERNAL_ERROR);
return Err(crate::Error::new_h2(e)); return Poll::Ready(Err(crate::Error::new_h2(e)));
} }
} }
}) })
@@ -300,7 +299,7 @@ where
body_tx body_tx
.send_data(buf, true) .send_data(buf, true)
.map_err(crate::Error::new_body_write)?; .map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(())); return Poll::Ready(Ok(()));
} }
if !body.is_end_stream() { if !body.is_end_stream() {
@@ -308,32 +307,32 @@ where
H2StreamState::Body(PipeToSendStream::new(body, body_tx)) H2StreamState::Body(PipeToSendStream::new(body, body_tx))
} else { } else {
reply!(true); reply!(true);
return Ok(Async::Ready(())); return Poll::Ready(Ok(()));
} }
}, },
H2StreamState::Body(ref mut pipe) => { H2StreamState::Body(ref mut pipe) => {
return pipe.poll(); return Pin::new(pipe).poll(cx);
} }
}; };
self.state = next; me.state = next;
} }
*/
} }
} }
/*
impl<F, B> Future for H2Stream<F, B> impl<F, B, E> Future for H2Stream<F, B>
where where
F: Future<Item=Response<B>>, F: Future<Output = Result<Response<B>, E>>,
F::Error: Into<Box<dyn StdError + Send + Sync>>, B: Payload + Unpin,
B: Payload, B::Data: Unpin,
E: Into<Box<dyn StdError + Send + Sync>>,
{ {
type Item = (); type Output = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll2() self.poll2(cx).map(|res| {
.map_err(|e| debug!("stream error: {}", e)) if let Err(e) = res {
debug!("stream error: {}", e);
}
})
} }
} }
*/

View File

@@ -358,6 +358,7 @@ impl<E> Http<E> {
S: Service<ReqBody=Body, ResBody=Bd>, S: Service<ReqBody=Body, ResBody=Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bd: Payload, Bd: Payload,
Bd::Data: Unpin,
I: AsyncRead + AsyncWrite + Unpin, I: AsyncRead + AsyncWrite + Unpin,
E: H2Exec<S::Future, Bd>, E: H2Exec<S::Future, Bd>,
{ {
@@ -479,6 +480,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin, I: AsyncRead + AsyncWrite + Unpin,
B: Payload + 'static, B: Payload + 'static,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
/// Start a graceful shutdown process for this connection. /// Start a graceful shutdown process for this connection.
@@ -549,7 +551,7 @@ where
loop { loop {
let polled = match *self.conn.as_mut().unwrap() { let polled = match *self.conn.as_mut().unwrap() {
Either::A(ref mut h1) => h1.poll_without_shutdown(cx), Either::A(ref mut h1) => h1.poll_without_shutdown(cx),
Either::B(ref mut h2) => unimplemented!("Connection::poll_without_shutdown h2"),//return h2.poll().map(|x| x.map(|_| ())), Either::B(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()),
}; };
match ready!(polled) { match ready!(polled) {
Ok(x) => return Poll::Ready(Ok(x)), Ok(x) => return Poll::Ready(Ok(x)),
@@ -629,6 +631,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static, I: AsyncRead + AsyncWrite + Unpin + 'static,
B: Payload + 'static, B: Payload + 'static,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
type Output = crate::Result<()>; type Output = crate::Result<()>;
@@ -744,6 +747,7 @@ where
F: Future<Output=Result<S, FE>>, F: Future<Output=Result<S, FE>>,
S: Service<ReqBody=Body, ResBody=B>, S: Service<ReqBody=Body, ResBody=B>,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
type Output = Result<Connection<I, S, E>, FE>; type Output = Result<Connection<I, S, E>, FE>;
@@ -852,6 +856,7 @@ pub(crate) mod spawn_all {
where where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static, I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Service<ReqBody=Body> + 'static, S: Service<ReqBody=Body> + 'static,
<S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>, E: H2Exec<S::Future, S::ResBody>,
{ {
type Future = UpgradeableConnection<I, S, E>; type Future = UpgradeableConnection<I, S, E>;
@@ -895,6 +900,7 @@ pub(crate) mod spawn_all {
NE: Into<Box<dyn StdError + Send + Sync>>, NE: Into<Box<dyn StdError + Send + Sync>>,
S: Service<ReqBody=Body, ResBody=B>, S: Service<ReqBody=Body, ResBody=B>,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
W: Watcher<I, S, E>, W: Watcher<I, S, E>,
{ {
@@ -960,6 +966,7 @@ mod upgrades {
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin, I: AsyncRead + AsyncWrite + Unpin,
B: Payload + 'static, B: Payload + 'static,
B::Data: Unpin,
E: H2Exec<S::Future, B>, E: H2Exec<S::Future, B>,
{ {
/// Start a graceful shutdown process for this connection. /// Start a graceful shutdown process for this connection.
@@ -977,6 +984,7 @@ mod upgrades {
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static, I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static, B: Payload + 'static,
B::Data: Unpin,
E: super::H2Exec<S::Future, B>, E: super::H2Exec<S::Future, B>,
{ {
type Output = crate::Result<()>; type Output = crate::Result<()>;

View File

@@ -154,6 +154,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static, S::Service: 'static,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<<S::Service as Service>::Future, B>, E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>, E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{ {
@@ -211,6 +212,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static, S::Service: 'static,
B: Payload, B: Payload,
B::Data: Unpin,
E: H2Exec<<S::Service as Service>::Future, B>, E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>, E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
{ {
@@ -409,6 +411,7 @@ impl<I, E> Builder<I, E> {
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static, S::Service: 'static,
B: Payload, B: Payload,
B::Data: Unpin,
E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>, E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
E: H2Exec<<S::Service as Service>::Future, B>, E: H2Exec<<S::Service as Service>::Future, B>,
{ {

View File

@@ -47,6 +47,7 @@ where
S::Service: 'static, S::Service: 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload, B: Payload,
B::Data: Unpin,
F: Future<Output=()>, F: Future<Output=()>,
E: H2Exec<<S::Service as Service>::Future, B>, E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>, E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
@@ -98,6 +99,7 @@ impl<I, S, E> Watcher<I, S, E> for GracefulWatcher
where where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static, I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Service<ReqBody=Body> + 'static, S: Service<ReqBody=Body> + 'static,
<S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>, E: H2Exec<S::Future, S::ResBody>,
{ {
type Future = Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>; type Future = Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>;
@@ -116,6 +118,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>, S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin, I: AsyncRead + AsyncWrite + Unpin,
S::ResBody: Payload + 'static, S::ResBody: Payload + 'static,
<S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>, E: H2Exec<S::Future, S::ResBody>,
{ {
conn.graceful_shutdown() conn.graceful_shutdown()