refactor(http2): re-enable http2 client and server support

This commit is contained in:
Sean McArthur
2019-08-19 14:03:05 -07:00
parent 4920f5e264
commit 41f4173615
11 changed files with 323 additions and 369 deletions

View File

@@ -1,8 +1,11 @@
use bytes::IntoBuf;
//use futures::{Async, Future, Poll, Stream};
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, FutureExt as _, Either};
use futures_util::stream::StreamExt as _;
use futures_util::try_future::TryFutureExt as _;
//use futures::future::{self, Either};
//use futures::sync::{mpsc, oneshot};
use h2::client::{Builder, Handshake, SendRequest};
use h2::client::{Builder, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all;
@@ -14,192 +17,187 @@ use super::{PipeToSendStream, SendBuf};
use crate::{Body, Request, Response};
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown.
//type ConnDropRef = mpsc::Sender<Never>;
type ConnDropRef = mpsc::Sender<Never>;
///// A oneshot channel watches the `Connection` task, and when it completes,
///// the "dispatch" task will be notified and can shutdown sooner.
//type ConnEof = oneshot::Receiver<Never>;
type ConnEof = oneshot::Receiver<Never>;
pub(crate) struct Client<T, B>
pub(crate) async fn handshake<T, B>(
io: T,
req_rx: ClientRx<B>,
builder: &Builder,
exec: Exec,
) -> crate::Result<ClientTask<B>>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
B: Payload,
B::Data: Unpin,
{
executor: Exec,
rx: ClientRx<B>,
state: State<T, SendBuf<B::Data>>,
}
let (h2_tx, conn) = builder
.handshake::<_, SendBuf<B::Data>>(io)
.await
.map_err(crate::Error::new_h2)?;
enum State<T, B> where B: IntoBuf {
Handshaking(Handshake<T, B>),
//Ready(SendRequest<B>, ConnDropRef, ConnEof),
}
// An mpsc channel is used entirely to detect when the
// 'Client' has been dropped. This is to get around a bug
// in h2 where dropping all SendRequests won't notify a
// parked Connection.
let (conn_drop_ref, rx) = mpsc::channel(1);
let (cancel_tx, conn_eof) = oneshot::channel();
impl<T, B> Client<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
B: Payload,
{
pub(crate) fn new(io: T, rx: ClientRx<B>, builder: &Builder, exec: Exec) -> Client<T, B> {
unimplemented!("proto::h2::Client::new");
/*
let handshake = builder.handshake(io);
let conn_drop_rx = rx.into_future()
.map(|(item, _rx)| {
match item {
Some(never) => match never {},
None => (),
}
});
Client {
executor: exec,
rx: rx,
state: State::Handshaking(handshake),
let conn = conn.map_err(|e| debug!("connection error: {}", e));
let conn_task = async move {
match future::select(conn, conn_drop_rx).await {
Either::Left(_) => {
// ok or err, the `conn` has finished
}
Either::Right(((), conn)) => {
// mpsc has been dropped, hopefully polling
// the connection some more should start shutdown
// and then close
trace!("send_request dropped, starting conn shutdown");
drop(cancel_tx);
let _ = conn.await;
}
}
*/
}
};
exec.execute(conn_task)?;
Ok(ClientTask {
conn_drop_ref,
conn_eof,
executor: exec,
h2_tx,
req_rx,
})
}
impl<T, B> Future for Client<T, B>
pub(crate) struct ClientTask<B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
B: Payload + 'static,
B: Payload,
{
conn_drop_ref: ConnDropRef,
conn_eof: ConnEof,
executor: Exec,
h2_tx: SendRequest<SendBuf<B::Data>>,
req_rx: ClientRx<B>,
}
impl<B> Future for ClientTask<B>
where
B: Payload + Unpin + 'static,
B::Data: Unpin,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for proto::h2::Client");
/*
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
let (request_tx, conn) = try_ready!(h.poll().map_err(crate::Error::new_h2));
// An mpsc channel is used entirely to detect when the
// 'Client' has been dropped. This is to get around a bug
// in h2 where dropping all SendRequests won't notify a
// parked Connection.
let (tx, rx) = mpsc::channel(0);
let (cancel_tx, cancel_rx) = oneshot::channel();
let rx = rx.into_future()
.map(|(msg, _)| match msg {
Some(never) => match never {},
None => (),
})
.map_err(|_| -> Never { unreachable!("mpsc cannot error") });
let fut = conn
.inspect(move |_| {
drop(cancel_tx);
trace!("connection complete")
})
.map_err(|e| debug!("connection error: {}", e))
.select2(rx)
.then(|res| match res {
Ok(Either::A(((), _))) |
Err(Either::A(((), _))) => {
// conn has finished either way
Either::A(future::ok(()))
},
Ok(Either::B(((), conn))) => {
// mpsc has been dropped, hopefully polling
// the connection some more should start shutdown
// and then close
trace!("send_request dropped, starting conn shutdown");
Either::B(conn)
}
Err(Either::B((never, _))) => match never {},
});
self.executor.execute(fut)?;
State::Ready(request_tx, tx, cancel_rx)
},
State::Ready(ref mut tx, ref conn_dropper, ref mut cancel_rx) => {
match tx.poll_ready() {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady),
match ready!(self.h2_tx.poll_ready(cx)) {
Ok(()) => (),
Err(err) => {
return if err.reason() == Some(::h2::Reason::NO_ERROR) {
trace!("connection gracefully shutdown");
Poll::Ready(Ok(Dispatched::Shutdown))
} else {
Poll::Ready(Err(crate::Error::new_h2(err)))
};
}
};
match Pin::new(&mut self.req_rx).poll_next(cx) {
Poll::Ready(Some((req, cb))) => {
// check that future hasn't been canceled already
if cb.is_canceled() {
trace!("request callback is canceled");
continue;
}
let (head, body) = req.into_parts();
let mut req = ::http::Request::from_parts(head, ());
super::strip_connection_headers(req.headers_mut(), true);
if let Some(len) = body.content_length() {
headers::set_content_length_if_missing(req.headers_mut(), len);
}
let eos = body.is_end_stream();
let (fut, body_tx) = match self.h2_tx.send_request(req, eos) {
Ok(ok) => ok,
Err(err) => {
return if err.reason() == Some(::h2::Reason::NO_ERROR) {
trace!("connection gracefully shutdown");
Ok(Async::Ready(Dispatched::Shutdown))
} else {
Err(crate::Error::new_h2(err))
};
debug!("client send request error: {}", err);
cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
};
if !eos {
let mut pipe = PipeToSendStream::new(body, body_tx)
.map(|res| {
if let Err(e) = res {
debug!("client request body error: {}", e);
}
});
// eagerly see if the body pipe is ready and
// can thus skip allocating in the executor
match Pin::new(&mut pipe).poll(cx) {
Poll::Ready(_) => (),
Poll::Pending => {
let conn_drop_ref = self.conn_drop_ref.clone();
let pipe = pipe.map(move |x| {
drop(conn_drop_ref);
x
});
self.executor.execute(pipe)?;
}
}
}
match self.rx.poll() {
Ok(Async::Ready(Some((req, cb)))) => {
// check that future hasn't been canceled already
if cb.is_canceled() {
trace!("request callback is canceled");
continue;
}
let (head, body) = req.into_parts();
let mut req = ::http::Request::from_parts(head, ());
super::strip_connection_headers(req.headers_mut(), true);
if let Some(len) = body.content_length() {
headers::set_content_length_if_missing(req.headers_mut(), len);
}
let eos = body.is_end_stream();
let (fut, body_tx) = match tx.send_request(req, eos) {
Ok(ok) => ok,
let fut = fut
.map(move |result| {
match result {
Ok(res) => {
let content_length = content_length_parse_all(res.headers());
let res = res.map(|stream|
crate::Body::h2(stream, content_length));
Ok(res)
},
Err(err) => {
debug!("client send request error: {}", err);
cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
};
if !eos {
let mut pipe = PipeToSendStream::new(body, body_tx)
.map_err(|e| debug!("client request body error: {}", e));
// eagerly see if the body pipe is ready and
// can thus skip allocating in the executor
match pipe.poll() {
Ok(Async::Ready(())) | Err(()) => (),
Ok(Async::NotReady) => {
let conn_drop_ref = conn_dropper.clone();
let pipe = pipe.then(move |x| {
drop(conn_drop_ref);
x
});
self.executor.execute(pipe)?;
}
debug!("client response error: {}", err);
Err((crate::Error::new_h2(err), None))
}
}
});
self.executor.execute(cb.send_when(fut))?;
continue;
},
let fut = fut
.then(move |result| {
match result {
Ok(res) => {
let content_length = content_length_parse_all(res.headers());
let res = res.map(|stream|
crate::Body::h2(stream, content_length));
Ok(res)
},
Err(err) => {
debug!("client response error: {}", err);
Err((crate::Error::new_h2(err), None))
}
}
});
self.executor.execute(cb.send_when(fut))?;
continue;
},
Poll::Ready(None) => {
trace!("client::dispatch::Sender dropped");
return Poll::Ready(Ok(Dispatched::Shutdown));
}
Ok(Async::NotReady) => {
match cancel_rx.poll() {
Ok(Async::Ready(never)) => match never {},
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_conn_is_eof) => {
trace!("connection task is closed, closing dispatch task");
return Ok(Async::Ready(Dispatched::Shutdown));
}
}
},
Ok(Async::Ready(None)) => {
trace!("client::dispatch::Sender dropped");
return Ok(Async::Ready(Dispatched::Shutdown));
},
Err(never) => match never {},
Poll::Pending => {
match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
Ok(never) => match never {},
Err(_conn_is_eof) => {
trace!("connection task is closed, closing dispatch task");
return Poll::Ready(Ok(Dispatched::Shutdown));
}
}
},
};
self.state = next;
}
}
*/
}
}

View File

@@ -1,5 +1,4 @@
use bytes::Buf;
//use futures::{Async, Future, Poll};
use h2::{SendStream};
use http::header::{
HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER,
@@ -8,11 +7,12 @@ use http::header::{
use http::HeaderMap;
use crate::body::Payload;
use crate::common::{Future, Pin, Poll, task};
mod client;
pub(crate) mod client;
pub(crate) mod server;
pub(crate) use self::client::Client;
pub(crate) use self::client::ClientTask;
pub(crate) use self::server::Server;
fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
@@ -106,17 +106,13 @@ where
}
}
/*
impl<S> Future for PipeToSendStream<S>
where
S: Payload,
S: Payload + Unpin,
{
type Item = ();
type Error = crate::Error;
type Output = crate::Result<()>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
unimplemented!("impl Future for PipeToSendStream");
/*
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
loop {
if !self.data_done {
// we don't have the next chunk of data yet, so just reserve 1 byte to make
@@ -126,23 +122,25 @@ where
if self.body_tx.capacity() == 0 {
loop {
match try_ready!(self.body_tx.poll_capacity().map_err(crate::Error::new_body_write)) {
Some(0) => {}
Some(_) => break,
None => return Err(crate::Error::new_canceled()),
match ready!(self.body_tx.poll_capacity(cx)) {
Some(Ok(0)) => {},
Some(Ok(_)) => break,
Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))) ,
None => return Poll::Ready(Err(crate::Error::new_canceled())),
}
}
} else {
if let Async::Ready(reason) =
self.body_tx.poll_reset().map_err(crate::Error::new_body_write)?
if let Poll::Ready(reason) =
self.body_tx.poll_reset(cx).map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
}
}
match try_ready!(self.stream.poll_data().map_err(|e| self.on_user_err(e))) {
Some(chunk) => {
match ready!(Pin::new(&mut self.stream).poll_data(cx)) {
Some(Ok(chunk)) => {
let is_eos = self.stream.is_end_stream();
trace!(
"send body chunk: {} bytes, eos={}",
@@ -156,14 +154,15 @@ where
.map_err(crate::Error::new_body_write)?;
if is_eos {
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
}
Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => {
self.body_tx.reserve_capacity(0);
let is_eos = self.stream.is_end_stream();
if is_eos {
return self.send_eos_frame().map(Async::Ready);
return Poll::Ready(self.send_eos_frame());
} else {
self.data_done = true;
// loop again to poll_trailers
@@ -171,31 +170,30 @@ where
}
}
} else {
if let Async::Ready(reason) =
self.body_tx.poll_reset().map_err(|e| crate::Error::new_body_write(e))?
if let Poll::Ready(reason) =
self.body_tx.poll_reset(cx).map_err(|e| crate::Error::new_body_write(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
}
match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_user_err(e))) {
Some(trailers) => {
match ready!(Pin::new(&mut self.stream).poll_trailers(cx)) {
Some(Ok(trailers)) => {
self.body_tx
.send_trailers(trailers)
.map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => {
// There were no trailers, so send an empty DATA frame...
return self.send_eos_frame().map(Async::Ready);
return Poll::Ready(self.send_eos_frame());
}
}
}
}
*/
}
}
*/
struct SendBuf<B>(Option<B>);

View File

@@ -1,5 +1,7 @@
use std::error::Error as StdError;
use std::marker::Unpin;
use futures_core::Stream;
use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite};
@@ -49,27 +51,23 @@ where
impl<T, S, B, E> Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, builder: &Builder, exec: E) -> Server<T, S, B, E> {
unimplemented!("proto::h2::Server::new")
/*
let handshake = builder.handshake(io);
Server {
exec,
state: State::Handshaking(handshake),
service,
}
*/
}
pub fn graceful_shutdown(&mut self) {
unimplemented!("proto::h2::Server::graceful_shutdown")
/*
trace!("graceful_shutdown");
match self.state {
State::Handshaking(..) => {
@@ -86,54 +84,53 @@ where
}
}
self.state = State::Closed;
*/
}
}
impl<T, S, B, E> Future for Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("h2 server future")
/*
let me = &mut *self;
loop {
let next = match self.state {
let next = match me.state {
State::Handshaking(ref mut h) => {
let conn = try_ready!(h.poll().map_err(crate::Error::new_h2));
let conn = ready!(Pin::new(h).poll(cx).map_err(crate::Error::new_h2))?;
State::Serving(Serving {
conn,
closing: None,
})
},
State::Serving(ref mut srv) => {
try_ready!(srv.poll_server(&mut self.service, &self.exec));
return Ok(Async::Ready(Dispatched::Shutdown));
ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?;
return Poll::Ready(Ok(Dispatched::Shutdown));
}
State::Closed => {
// graceful_shutdown was called before handshaking finished,
// nothing to do here...
return Ok(Async::Ready(Dispatched::Shutdown));
return Poll::Ready(Ok(Dispatched::Shutdown));
}
};
self.state = next;
me.state = next;
}
*/
}
}
impl<T, B> Serving<T, B>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
B: Payload,
B::Data: Unpin,
{
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<crate::Result<()>>
fn poll_server<S, E>(&mut self, cx: &mut task::Context<'_>, service: &mut S, exec: &mut E) -> Poll<crate::Result<()>>
where
S: Service<
ReqBody=Body,
@@ -142,19 +139,18 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
/*
if self.closing.is_none() {
loop {
// At first, polls the readiness of supplied service.
match service.poll_ready() {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
match service.poll_ready(cx) {
Poll::Ready(Ok(())) => (),
Poll::Pending => {
// use `poll_close` instead of `poll`, in order to avoid accepting a request.
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
trace!("incoming connection complete");
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
Err(err) => {
Poll::Ready(Err(err)) => {
let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err);
@@ -173,29 +169,33 @@ where
}
// When the service is ready, accepts an incoming request.
if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(crate::Error::new_h2)) {
trace!("incoming request");
let content_length = content_length_parse_all(req.headers());
let req = req.map(|stream| {
crate::Body::h2(stream, content_length)
});
let fut = H2Stream::new(service.call(req), respond);
exec.execute_h2stream(fut)?;
} else {
// no more incoming streams...
trace!("incoming connection complete");
return Ok(Async::Ready(()))
match ready!(Pin::new(&mut self.conn).poll_next(cx)) {
Some(Ok((req, respond))) => {
trace!("incoming request");
let content_length = content_length_parse_all(req.headers());
let req = req.map(|stream| {
crate::Body::h2(stream, content_length)
});
let fut = H2Stream::new(service.call(req), respond);
exec.execute_h2stream(fut)?;
},
Some(Err(e)) => {
return Poll::Ready(Err(crate::Error::new_h2(e)));
},
None => {
// no more incoming streams...
trace!("incoming connection complete");
return Poll::Ready(Ok(()));
},
}
}
}
debug_assert!(self.closing.is_some(), "poll_server broke loop without closing");
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
Err(self.closing.take().expect("polled after error"))
*/
unimplemented!("h2 server poll_server")
Poll::Ready(Err(self.closing.take().expect("polled after error")))
}
}
@@ -230,38 +230,37 @@ where
}
}
impl<F, B> Future for H2Stream<F, B>
impl<F, B, E> H2Stream<F, B>
where
//F: Future<Item=Response<B>>,
//F::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
F: Future<Output = Result<Response<B>, E>>,
B: Payload + Unpin,
B::Data: Unpin,
E: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for H2Stream");
/*
fn poll2(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Safety: State::{Service, Body} futures are never moved
let me = unsafe { self.get_unchecked_mut() };
loop {
let next = match self.state {
let next = match me.state {
H2StreamState::Service(ref mut h) => {
let res = match h.poll() {
Ok(Async::Ready(r)) => r,
Ok(Async::NotReady) => {
// Body is not yet ready, so we want to check if the client has sent a
let res = match unsafe { Pin::new_unchecked(h) }.poll(cx) {
Poll::Ready(Ok(r)) => r,
Poll::Pending => {
// Response is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request.
if let Async::Ready(reason) =
self.reply.poll_reset().map_err(|e| crate::Error::new_h2(e))?
if let Poll::Ready(reason) =
me.reply.poll_reset(cx).map_err(|e| crate::Error::new_h2(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(crate::Error::new_h2(reason.into()));
return Poll::Ready(Err(crate::Error::new_h2(reason.into())));
}
return Ok(Async::NotReady);
return Poll::Pending;
}
Err(e) => {
Poll::Ready(Err(e)) => {
let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err);
self.reply.send_reset(err.h2_reason());
return Err(err);
me.reply.send_reset(err.h2_reason());
return Poll::Ready(Err(err));
},
};
@@ -278,12 +277,12 @@ where
macro_rules! reply {
($eos:expr) => ({
match self.reply.send_response(res, $eos) {
match me.reply.send_response(res, $eos) {
Ok(tx) => tx,
Err(e) => {
debug!("send response error: {}", e);
self.reply.send_reset(Reason::INTERNAL_ERROR);
return Err(crate::Error::new_h2(e));
me.reply.send_reset(Reason::INTERNAL_ERROR);
return Poll::Ready(Err(crate::Error::new_h2(e)));
}
}
})
@@ -300,7 +299,7 @@ where
body_tx
.send_data(buf, true)
.map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
if !body.is_end_stream() {
@@ -308,32 +307,32 @@ where
H2StreamState::Body(PipeToSendStream::new(body, body_tx))
} else {
reply!(true);
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
},
H2StreamState::Body(ref mut pipe) => {
return pipe.poll();
return Pin::new(pipe).poll(cx);
}
};
self.state = next;
me.state = next;
}
*/
}
}
/*
impl<F, B> Future for H2Stream<F, B>
impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Item=Response<B>>,
F::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
F: Future<Output = Result<Response<B>, E>>,
B: Payload + Unpin,
B::Data: Unpin,
E: Into<Box<dyn StdError + Send + Sync>>,
{
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.poll2()
.map_err(|e| debug!("stream error: {}", e))
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll2(cx).map(|res| {
if let Err(e) = res {
debug!("stream error: {}", e);
}
})
}
}
*/