feat(lib): update to std::future::Future

BREAKING CHANGE: All usage of async traits (`Future`, `Stream`,
`AsyncRead`, `AsyncWrite`, etc) are updated to newer versions.
This commit is contained in:
Sean McArthur
2019-07-09 15:37:43 -07:00
parent da9b0319ef
commit 8f4b05ae78
37 changed files with 1526 additions and 1548 deletions

View File

@@ -3,12 +3,12 @@ use std::io::{self};
use std::marker::PhantomData;
use bytes::{Buf, Bytes};
use futures::{Async, Poll};
use http::{HeaderMap, Method, Version};
use http::header::{HeaderValue, CONNECTION};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::Chunk;
use crate::common::{Pin, Poll, Unpin, task};
use crate::proto::{BodyLength, DecodedLength, MessageHead};
use crate::headers::connection_keep_alive;
use super::io::{Buffered};
@@ -26,11 +26,11 @@ const H2_PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
pub(crate) struct Conn<I, B, T> {
io: Buffered<I, EncodedBuf<B>>,
state: State,
_marker: PhantomData<T>
_marker: PhantomData<fn(T)>
}
impl<I, B, T> Conn<I, B, T>
where I: AsyncRead + AsyncWrite,
where I: AsyncRead + AsyncWrite + Unpin,
B: Buf,
T: Http1Transaction,
{
@@ -129,16 +129,15 @@ where I: AsyncRead + AsyncWrite,
read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
}
pub fn read_head(&mut self) -> Poll<Option<(MessageHead<T::Incoming>, DecodedLength, bool)>, crate::Error> {
pub fn poll_read_head(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<(MessageHead<T::Incoming>, DecodedLength, bool)>>> {
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
let msg = match self.io.parse::<T>(ParseContext {
let msg = match ready!(self.io.parse::<T>(cx, ParseContext {
cached_headers: &mut self.state.cached_headers,
req_method: &mut self.state.method,
}) {
Ok(Async::Ready(msg)) => msg,
Ok(Async::NotReady) => return Ok(Async::NotReady),
})) {
Ok(msg) => msg,
Err(e) => return self.on_read_head_error(e),
};
@@ -155,7 +154,7 @@ where I: AsyncRead + AsyncWrite,
debug_assert!(!msg.expect_continue, "expect-continue needs a body");
self.state.reading = Reading::KeepAlive;
if !T::should_read_first() {
self.try_keep_alive();
self.try_keep_alive(cx);
}
} else {
if msg.expect_continue {
@@ -165,10 +164,10 @@ where I: AsyncRead + AsyncWrite,
self.state.reading = Reading::Body(Decoder::new(msg.decode));
};
Ok(Async::Ready(Some((msg.head, msg.decode, msg.wants_upgrade))))
Poll::Ready(Some(Ok((msg.head, msg.decode, msg.wants_upgrade))))
}
fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<Z>, crate::Error> {
fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<crate::Result<Z>>> {
// If we are currently waiting on a message, then an empty
// message should be reported as an error. If not, it is just
// the connection closing gracefully.
@@ -179,25 +178,28 @@ where I: AsyncRead + AsyncWrite,
if was_mid_parse || must_error {
// We check if the buf contains the h2 Preface
debug!("parse error ({}) with {} bytes", e, self.io.read_buf().len());
self.on_parse_error(e)
.map(|()| Async::NotReady)
match self.on_parse_error(e) {
Ok(()) => Poll::Pending, // XXX: wat?
Err(e) => Poll::Ready(Some(Err(e))),
}
} else {
debug!("read eof");
Ok(Async::Ready(None))
Poll::Ready(None)
}
}
pub fn read_body(&mut self) -> Poll<Option<Chunk>, io::Error> {
pub fn poll_read_body(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<io::Result<Chunk>>> {
debug_assert!(self.can_read_body());
let (reading, ret) = match self.state.reading {
Reading::Body(ref mut decoder) => {
match decoder.decode(&mut self.io) {
Ok(Async::Ready(slice)) => {
match decoder.decode(cx, &mut self.io) {
Poll::Ready(Ok(slice)) => {
let (reading, chunk) = if decoder.is_eof() {
debug!("incoming body completed");
(Reading::KeepAlive, if !slice.is_empty() {
Some(Chunk::from(slice))
Some(Ok(Chunk::from(slice)))
} else {
None
})
@@ -208,14 +210,14 @@ where I: AsyncRead + AsyncWrite,
// an empty slice...
(Reading::Closed, None)
} else {
return Ok(Async::Ready(Some(Chunk::from(slice))));
return Poll::Ready(Some(Ok(Chunk::from(slice))));
};
(reading, Ok(Async::Ready(chunk)))
(reading, Poll::Ready(chunk))
},
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => {
debug!("decode stream error: {}", e);
(Reading::Closed, Err(e))
(Reading::Closed, Poll::Ready(Some(Err(e))))
},
}
},
@@ -223,7 +225,7 @@ where I: AsyncRead + AsyncWrite,
};
self.state.reading = reading;
self.try_keep_alive();
self.try_keep_alive(cx);
ret
}
@@ -233,13 +235,13 @@ where I: AsyncRead + AsyncWrite,
ret
}
pub fn read_keep_alive(&mut self) -> Poll<(), crate::Error> {
pub fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
if self.is_mid_message() {
self.mid_message_detect_eof()
self.mid_message_detect_eof(cx)
} else {
self.require_empty_read()
self.require_empty_read(cx)
}
}
@@ -254,25 +256,25 @@ where I: AsyncRead + AsyncWrite,
//
// This should only be called for Clients wanting to enter the idle
// state.
fn require_empty_read(&mut self) -> Poll<(), crate::Error> {
fn require_empty_read(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(!self.is_mid_message());
debug_assert!(T::is_client());
if !self.io.read_buf().is_empty() {
debug!("received an unexpected {} bytes", self.io.read_buf().len());
return Err(crate::Error::new_unexpected_message());
return Poll::Ready(Err(crate::Error::new_unexpected_message()));
}
let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?;
if num_read == 0 {
let ret = if self.should_error_on_eof() {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
Err(crate::Error::new_incomplete())
Poll::Ready(Err(crate::Error::new_incomplete()))
} else {
trace!("found EOF on idle connection, closing");
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
};
// order is important: should_error needs state BEFORE close_read
@@ -281,38 +283,39 @@ where I: AsyncRead + AsyncWrite,
}
debug!("received unexpected {} bytes on an idle connection", num_read);
Err(crate::Error::new_unexpected_message())
Poll::Ready(Err(crate::Error::new_unexpected_message()))
}
fn mid_message_detect_eof(&mut self) -> Poll<(), crate::Error> {
fn mid_message_detect_eof(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(self.is_mid_message());
if self.state.allow_half_close || !self.io.read_buf().is_empty() {
return Ok(Async::NotReady);
return Poll::Pending;
}
let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?;
if num_read == 0 {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
self.state.close_read();
Err(crate::Error::new_incomplete())
Poll::Ready(Err(crate::Error::new_incomplete()))
} else {
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
}
fn force_io_read(&mut self) -> Poll<usize, io::Error> {
self.io.read_from_io().map_err(|e| {
fn force_io_read(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
let result = ready!(self.io.poll_read_from_io(cx));
Poll::Ready(result.map_err(|e| {
trace!("force_io_read; io error = {:?}", e);
self.state.close();
e
})
}))
}
fn maybe_notify(&mut self) {
fn maybe_notify(&mut self, cx: &mut task::Context<'_>) {
// its possible that we returned NotReady from poll() without having
// exhausted the underlying Io. We would have done this when we
// determined we couldn't keep reading until we knew how writing
@@ -336,13 +339,13 @@ where I: AsyncRead + AsyncWrite,
if !self.io.is_read_blocked() {
if self.io.read_buf().is_empty() {
match self.io.read_from_io() {
Ok(Async::Ready(_)) => (),
Ok(Async::NotReady) => {
match self.io.poll_read_from_io(cx) {
Poll::Ready(Ok(_)) => (),
Poll::Pending => {
trace!("maybe_notify; read_from_io blocked");
return
},
Err(e) => {
Poll::Ready(Err(e)) => {
trace!("maybe_notify; read_from_io error: {}", e);
self.state.close();
}
@@ -352,9 +355,9 @@ where I: AsyncRead + AsyncWrite,
}
}
fn try_keep_alive(&mut self) {
fn try_keep_alive(&mut self, cx: &mut task::Context<'_>) {
self.state.try_keep_alive::<T>();
self.maybe_notify();
self.maybe_notify(cx);
}
pub fn can_write_head(&self) -> bool {
@@ -586,23 +589,22 @@ where I: AsyncRead + AsyncWrite,
Err(err)
}
pub fn flush(&mut self) -> Poll<(), io::Error> {
try_ready!(self.io.flush());
self.try_keep_alive();
pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
ready!(Pin::new(&mut self.io).poll_flush(cx))?;
self.try_keep_alive(cx);
trace!("flushed({}): {:?}", T::LOG, self.state);
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
pub fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.io.io_mut().shutdown() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(())) => {
pub fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) {
Ok(()) => {
trace!("shut down IO complete");
Ok(Async::Ready(()))
}
Poll::Ready(Ok(()))
},
Err(e) => {
debug!("error shutting down IO: {}", e);
Err(e)
Poll::Ready(Err(e))
}
}
}
@@ -652,6 +654,9 @@ impl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {
}
}
// B and T are never pinned
impl<I: Unpin, B, T> Unpin for Conn<I, B, T> {}
struct State {
allow_half_close: bool,
/// Re-usable HeaderMap to reduce allocating new ones.

View File

@@ -3,9 +3,10 @@ use std::fmt;
use std::usize;
use std::io;
use futures::{Async, Poll};
use bytes::Bytes;
use crate::common::{Poll, task};
use super::io::MemRead;
use super::{DecodedLength};
@@ -93,50 +94,51 @@ impl Decoder {
}
}
pub fn decode<R: MemRead>(&mut self, body: &mut R) -> Poll<Bytes, io::Error> {
pub fn decode<R: MemRead>(&mut self, cx: &mut task::Context<'_>, body: &mut R) -> Poll<Result<Bytes, io::Error>> {
trace!("decode; state={:?}", self.kind);
match self.kind {
Length(ref mut remaining) => {
if *remaining == 0 {
Ok(Async::Ready(Bytes::new()))
Poll::Ready(Ok(Bytes::new()))
} else {
let to_read = *remaining as usize;
let buf = try_ready!(body.read_mem(to_read));
let buf = ready!(body.read_mem(cx, to_read))?;
let num = buf.as_ref().len() as u64;
if num > *remaining {
*remaining = 0;
} else if num == 0 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody));
return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody)));
} else {
*remaining -= num;
}
Ok(Async::Ready(buf))
Poll::Ready(Ok(buf))
}
}
Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
// advances the chunked state
*state = try_ready!(state.step(body, size, &mut buf));
*state = ready!(state.step(cx, body, size, &mut buf))?;
if *state == ChunkedState::End {
trace!("end of chunked");
return Ok(Async::Ready(Bytes::new()));
return Poll::Ready(Ok(Bytes::new()));
}
if let Some(buf) = buf {
return Ok(Async::Ready(buf));
return Poll::Ready(Ok(buf));
}
}
}
Eof(ref mut is_eof) => {
if *is_eof {
Ok(Async::Ready(Bytes::new()))
Poll::Ready(Ok(Bytes::new()))
} else {
// 8192 chosen because its about 2 packets, there probably
// won't be that much available, so don't have MemReaders
// allocate buffers to big
let slice = try_ready!(body.read_mem(8192));
*is_eof = slice.is_empty();
Ok(Async::Ready(slice))
body.read_mem(cx, 8192).map_ok(|slice| {
*is_eof = slice.is_empty();
slice
})
}
}
}
@@ -151,41 +153,42 @@ impl fmt::Debug for Decoder {
}
macro_rules! byte (
($rdr:ident) => ({
let buf = try_ready!($rdr.read_mem(1));
($rdr:ident, $cx:expr) => ({
let buf = ready!($rdr.read_mem($cx, 1))?;
if !buf.is_empty() {
buf[0]
} else {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected eof during chunk size line"));
return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"unexpected EOF during chunk size line")));
}
})
);
impl ChunkedState {
fn step<R: MemRead>(&self,
cx: &mut task::Context<'_>,
body: &mut R,
size: &mut u64,
buf: &mut Option<Bytes>)
-> Poll<ChunkedState, io::Error> {
-> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Ok(Async::Ready(ChunkedState::End)),
Size => ChunkedState::read_size(cx, body, size),
SizeLws => ChunkedState::read_size_lws(cx, body),
Extension => ChunkedState::read_extension(cx, body),
SizeLf => ChunkedState::read_size_lf(cx, body, *size),
Body => ChunkedState::read_body(cx, body, size, buf),
BodyCr => ChunkedState::read_body_cr(cx, body),
BodyLf => ChunkedState::read_body_lf(cx, body),
EndCr => ChunkedState::read_end_cr(cx, body),
EndLf => ChunkedState::read_end_lf(cx, body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size<R: MemRead>(rdr: &mut R, size: &mut u64) -> Poll<ChunkedState, io::Error> {
fn read_size<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Read chunk hex size");
let radix = 16;
match byte!(rdr) {
match byte!(rdr, cx) {
b @ b'0'..=b'9' => {
*size *= radix;
*size += (b - b'0') as u64;
@@ -198,55 +201,55 @@ impl ChunkedState {
*size *= radix;
*size += (b + 10 - b'A') as u64;
}
b'\t' | b' ' => return Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => return Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => return Ok(Async::Ready(ChunkedState::SizeLf)),
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size"));
return Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size")));
}
}
Ok(Async::Ready(ChunkedState::Size))
Poll::Ready(Ok(ChunkedState::Size))
}
fn read_size_lws<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
fn read_size_lws<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_size_lws");
match byte!(rdr) {
match byte!(rdr, cx) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space"))
Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space")))
}
}
}
fn read_extension<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
fn read_extension<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_extension");
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
_ => Ok(Async::Ready(ChunkedState::Extension)), // no supported extensions
match byte!(rdr, cx) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf<R: MemRead>(rdr: &mut R, size: u64) -> Poll<ChunkedState, io::Error> {
fn read_size_lf<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunk size is {:?}", size);
match byte!(rdr) {
match byte!(rdr, cx) {
b'\n' => {
if size == 0 {
Ok(Async::Ready(ChunkedState::EndCr))
Poll::Ready(Ok(ChunkedState::EndCr))
} else {
debug!("incoming chunked header: {0:#X} ({0} bytes)", size);
Ok(Async::Ready(ChunkedState::Body))
Poll::Ready(Ok(ChunkedState::Body))
}
},
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk size LF")),
_ => Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk size LF"))),
}
}
fn read_body<R: MemRead>(rdr: &mut R,
fn read_body<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R,
rem: &mut u64,
buf: &mut Option<Bytes>)
-> Poll<ChunkedState, io::Error> {
-> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
// cap remaining bytes at the max capacity of usize
@@ -256,45 +259,45 @@ impl ChunkedState {
};
let to_read = rem_cap;
let slice = try_ready!(rdr.read_mem(to_read));
let slice = ready!(rdr.read_mem(cx, to_read))?;
let count = slice.len();
if count == 0 {
*rem = 0;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody));
return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody)));
}
*buf = Some(slice);
*rem -= count as u64;
if *rem > 0 {
Ok(Async::Ready(ChunkedState::Body))
Poll::Ready(Ok(ChunkedState::Body))
} else {
Ok(Async::Ready(ChunkedState::BodyCr))
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
fn read_body_cr<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::BodyLf)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body CR")),
fn read_body_cr<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr, cx) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body CR"))),
}
}
fn read_body_lf<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::Size)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body LF")),
fn read_body_lf<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr, cx) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body LF"))),
}
}
fn read_end_cr<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::EndLf)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end CR")),
fn read_end_cr<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr, cx) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end CR"))),
}
}
fn read_end_lf<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::End)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end LF")),
fn read_end_lf<R: MemRead>(cx: &mut task::Context<'_>, rdr: &mut R) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr, cx) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end LF"))),
}
}
}
@@ -326,15 +329,15 @@ mod tests {
use crate::mock::AsyncIo;
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
fn read_mem(&mut self, len: usize) -> Poll<Result<Bytes, io::Error>> {
let n = ::std::cmp::min(len, self.len());
if n > 0 {
let (a, b) = self.split_at(n);
let mut buf = BytesMut::from(a);
*self = b;
Ok(Async::Ready(buf.split_to(n).freeze()))
Poll::Ready(Ok(buf.split_to(n).freeze()))
} else {
Ok(Async::Ready(Bytes::new()))
Poll::Ready(Ok(Bytes::new()))
}
}
}

View File

@@ -1,13 +1,12 @@
use std::error::Error as StdError;
use bytes::{Buf, Bytes};
use futures::{Async, Future, Poll, Stream};
use http::{Request, Response, StatusCode};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Payload};
use crate::body::internal::FullDataArg;
use crate::common::{Never, YieldNow};
use crate::common::{Future, Never, Poll, Pin, Unpin, task};
use crate::proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
use super::Http1Transaction;
use crate::service::Service;
@@ -16,12 +15,8 @@ pub(crate) struct Dispatcher<D, Bs: Payload, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
body_tx: Option<crate::body::Sender>,
body_rx: Option<Bs>,
body_rx: Pin<Box<Option<Bs>>>,
is_closing: bool,
/// If the poll loop reaches its max spin count, it will yield by notifying
/// the task immediately. This will cache that `Task`, since it usually is
/// the same one.
yield_now: YieldNow,
}
pub(crate) trait Dispatch {
@@ -29,14 +24,14 @@ pub(crate) trait Dispatch {
type PollBody;
type PollError;
type RecvItem;
fn poll_msg(&mut self) -> Poll<Option<(Self::PollItem, Self::PollBody)>, Self::PollError>;
fn poll_msg(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>;
fn poll_ready(&mut self) -> Poll<(), ()>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>>;
fn should_poll(&self) -> bool;
}
pub struct Server<S: Service> {
in_flight: Option<S::Future>,
in_flight: Pin<Box<Option<S::Future>>>,
pub(crate) service: S,
}
@@ -49,10 +44,10 @@ type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
where
D: Dispatch<PollItem=MessageHead<T::Outgoing>, PollBody=Bs, RecvItem=MessageHead<T::Incoming>>,
D: Dispatch<PollItem=MessageHead<T::Outgoing>, PollBody=Bs, RecvItem=MessageHead<T::Incoming>> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite,
T: Http1Transaction,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
Bs: Payload,
{
pub fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
@@ -60,9 +55,8 @@ where
conn: conn,
dispatch: dispatch,
body_tx: None,
body_rx: None,
body_rx: Box::pin(None),
is_closing: false,
yield_now: YieldNow::new(),
}
}
@@ -80,55 +74,74 @@ where
///
/// This is useful for old-style HTTP upgrades, but ignores
/// newer-style upgrade API.
pub fn poll_without_shutdown(&mut self) -> Poll<(), crate::Error> {
self.poll_catch(false)
.map(|x| {
x.map(|ds| if let Dispatched::Upgrade(pending) = ds {
pending.manual();
})
})
pub(crate) fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>>
where
Self: Unpin,
{
Pin::new(self).poll_catch(cx, false).map_ok(|ds| {
if let Dispatched::Upgrade(pending) = ds {
pending.manual();
}
})
}
fn poll_catch(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
self.poll_inner(should_shutdown).or_else(|e| {
fn poll_catch(&mut self, cx: &mut task::Context<'_>, should_shutdown: bool) -> Poll<crate::Result<Dispatched>> {
Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {
// An error means we're shutting down either way.
// We just try to give the error to the user,
// and close the connection with an Ok. If we
// cannot give it to the user, then return the Err.
self.dispatch.recv_msg(Err(e))?;
Ok(Async::Ready(Dispatched::Shutdown))
})
Ok(Dispatched::Shutdown)
}))
}
fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
fn poll_inner(&mut self, cx: &mut task::Context<'_>, should_shutdown: bool) -> Poll<crate::Result<Dispatched>> {
T::update_date();
try_ready!(self.poll_loop());
ready!(self.poll_loop(cx))?;
loop {
self.poll_read(cx)?;
self.poll_write(cx)?;
self.poll_flush(cx)?;
// This could happen if reading paused before blocking on IO,
// such as getting to the end of a framed message, but then
// writing/flushing set the state back to Init. In that case,
// if the read buffer still had bytes, we'd want to try poll_read
// again, or else we wouldn't ever be woken up again.
//
// Using this instead of task::current() and notify() inside
// the Conn is noticeably faster in pipelined benchmarks.
if !self.conn.wants_read_again() {
break;
}
}
if self.is_done() {
if let Some(pending) = self.conn.pending_upgrade() {
self.conn.take_error()?;
return Ok(Async::Ready(Dispatched::Upgrade(pending)));
return Poll::Ready(Ok(Dispatched::Upgrade(pending)));
} else if should_shutdown {
try_ready!(self.conn.shutdown().map_err(crate::Error::new_shutdown));
ready!(self.conn.poll_shutdown(cx)).map_err(crate::Error::new_shutdown)?;
}
self.conn.take_error()?;
Ok(Async::Ready(Dispatched::Shutdown))
Poll::Ready(Ok(Dispatched::Shutdown))
} else {
Ok(Async::NotReady)
Poll::Pending
}
}
fn poll_loop(&mut self) -> Poll<(), crate::Error> {
fn poll_loop(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Limit the looping on this connection, in case it is ready far too
// often, so that other futures don't starve.
//
// 16 was chosen arbitrarily, as that is number of pipelined requests
// benchmarks often use. Perhaps it should be a config option instead.
for _ in 0..16 {
self.poll_read()?;
self.poll_write()?;
self.poll_flush()?;
self.poll_read(cx)?;
self.poll_write(cx)?;
self.poll_flush(cx)?;
// This could happen if reading paused before blocking on IO,
// such as getting to the end of a framed message, but then
@@ -140,45 +153,39 @@ where
// the Conn is noticeably faster in pipelined benchmarks.
if !self.conn.wants_read_again() {
//break;
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
}
trace!("poll_loop yielding (self = {:p})", self);
match self.yield_now.poll_yield() {
Ok(Async::NotReady) => Ok(Async::NotReady),
// maybe with `!` this can be cleaner...
// but for now, just doing this to eliminate branches
Ok(Async::Ready(never)) |
Err(never) => match never {}
}
task::yield_now(cx).map(|never| match never {})
}
fn poll_read(&mut self) -> Poll<(), crate::Error> {
fn poll_read(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
} else if self.conn.can_read_head() {
try_ready!(self.poll_read_head());
ready!(self.poll_read_head(cx))?;
} else if let Some(mut body) = self.body_tx.take() {
if self.conn.can_read_body() {
match body.poll_ready() {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
match body.poll_ready(cx) {
Poll::Ready(Ok(())) => (),
Poll::Pending => {
self.body_tx = Some(body);
return Ok(Async::NotReady);
return Poll::Pending;
},
Err(_canceled) => {
Poll::Ready(Err(_canceled)) => {
// user doesn't care about the body
// so we should stop reading
trace!("body receiver dropped before eof, closing");
self.conn.close_read();
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
}
match self.conn.read_body() {
Ok(Async::Ready(Some(chunk))) => {
match self.conn.poll_read_body(cx) {
Poll::Ready(Some(Ok(chunk))) => {
match body.send_data(chunk) {
Ok(()) => {
self.body_tx = Some(body);
@@ -191,14 +198,14 @@ where
}
}
},
Ok(Async::Ready(None)) => {
Poll::Ready(None) => {
// just drop, the body will close automatically
},
Ok(Async::NotReady) => {
Poll::Pending => {
self.body_tx = Some(body);
return Ok(Async::NotReady);
return Poll::Pending;
}
Err(e) => {
Poll::Ready(Some(Err(e))) => {
body.send_error(crate::Error::new_body(e));
}
}
@@ -206,25 +213,24 @@ where
// just drop, the body will close automatically
}
} else {
return self.conn.read_keep_alive();
return self.conn.poll_read_keep_alive(cx);
}
}
}
fn poll_read_head(&mut self) -> Poll<(), crate::Error> {
fn poll_read_head(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// can dispatch receive, or does it still care about, an incoming message?
match self.dispatch.poll_ready() {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady), // service might not be ready
match ready!(self.dispatch.poll_ready(cx)) {
Ok(()) => (),
Err(()) => {
trace!("dispatch no longer receiving messages");
self.close();
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
}
// dispatch is ready for a message, try to read one
match self.conn.read_head() {
Ok(Async::Ready(Some((head, body_len, wants_upgrade)))) => {
match ready!(self.conn.poll_read_head(cx)) {
Some(Ok((head, body_len, wants_upgrade))) => {
let mut body = match body_len {
DecodedLength::ZERO => Body::empty(),
other => {
@@ -237,67 +243,78 @@ where
body.set_on_upgrade(self.conn.on_upgrade());
}
self.dispatch.recv_msg(Ok((head, body)))?;
Ok(Async::Ready(()))
}
Ok(Async::Ready(None)) => {
// read eof, conn will start to shutdown automatically
Ok(Async::Ready(()))
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => {
Poll::Ready(Ok(()))
},
Some(Err(err)) => {
debug!("read_head error: {}", err);
self.dispatch.recv_msg(Err(err))?;
// if here, the dispatcher gave the user the error
// somewhere else. we still need to shutdown, but
// not as a second error.
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
},
None => {
// read eof, conn will start to shutdown automatically
Poll::Ready(Ok(()))
}
}
}
fn poll_write(&mut self) -> Poll<(), crate::Error> {
fn poll_write(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
} else if self.body_rx.is_none() && self.conn.can_write_head() && self.dispatch.should_poll() {
if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(crate::Error::new_user_service)) {
if let Some(msg) = ready!(self.dispatch.poll_msg(cx)) {
let (head, mut body) = msg.map_err(crate::Error::new_user_service)?;
// Check if the body knows its full data immediately.
//
// If so, we can skip a bit of bookkeeping that streaming
// bodies need to do.
if let Some(full) = body.__hyper_full_data(FullDataArg(())).0 {
self.conn.write_full_msg(head, full);
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
let body_type = if body.is_end_stream() {
self.body_rx = None;
self.body_rx.set(None);
None
} else {
let btype = body.content_length()
.map(BodyLength::Known)
.or_else(|| Some(BodyLength::Unknown));
self.body_rx = Some(body);
self.body_rx.set(Some(body));
btype
};
self.conn.write_head(head, body_type);
} else {
self.close();
return Ok(Async::Ready(()));
return Poll::Ready(Ok(()));
}
} else if !self.conn.can_buffer_body() {
try_ready!(self.poll_flush());
} else if let Some(mut body) = self.body_rx.take() {
if !self.conn.can_write_body() {
trace!(
"no more write body allowed, user body is_end_stream = {}",
body.is_end_stream(),
);
continue;
}
match body.poll_data().map_err(crate::Error::new_user_body)? {
Async::Ready(Some(chunk)) => {
ready!(self.poll_flush(cx))?;
} else {
// A new scope is needed :(
if let (Some(mut body), clear_body) = OptGuard::new(self.body_rx.as_mut()).guard_mut() {
debug_assert!(!*clear_body, "opt guard defaults to keeping body");
if !self.conn.can_write_body() {
trace!(
"no more write body allowed, user body is_end_stream = {}",
body.is_end_stream(),
);
*clear_body = true;
continue;
}
let item = ready!(body.as_mut().poll_data(cx));
if let Some(item) = item {
let chunk = item.map_err(|e| {
*clear_body = true;
crate::Error::new_user_body(e)
})?;
let eos = body.is_end_stream();
if eos {
*clear_body = true;
if chunk.remaining() == 0 {
trace!("discarding empty chunk");
self.conn.end_body();
@@ -305,30 +322,25 @@ where
self.conn.write_body_and_end(chunk);
}
} else {
self.body_rx = Some(body);
if chunk.remaining() == 0 {
trace!("discarding empty chunk");
continue;
}
self.conn.write_body(chunk);
}
},
Async::Ready(None) => {
} else {
*clear_body = true;
self.conn.end_body();
},
Async::NotReady => {
self.body_rx = Some(body);
return Ok(Async::NotReady);
}
} else {
return Poll::Pending;
}
} else {
return Ok(Async::NotReady);
}
}
}
fn poll_flush(&mut self) -> Poll<(), crate::Error> {
self.conn.flush().map_err(|err| {
fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.conn.poll_flush(cx).map_err(|err| {
debug!("error writing: {}", err);
crate::Error::new_body_write(err)
})
@@ -360,35 +372,65 @@ where
impl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>
where
D: Dispatch<PollItem=MessageHead<T::Outgoing>, PollBody=Bs, RecvItem=MessageHead<T::Incoming>>,
D: Dispatch<PollItem=MessageHead<T::Outgoing>, PollBody=Bs, RecvItem=MessageHead<T::Incoming>> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite,
T: Http1Transaction,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
Bs: Payload,
{
type Item = Dispatched;
type Error = crate::Error;
type Output = crate::Result<Dispatched>;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.poll_catch(true)
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll_catch(cx, true)
}
}
// ===== impl OptGuard =====
/// A drop guard to allow a mutable borrow of an Option while being able to
/// set whether the `Option` should be cleared on drop.
struct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);
impl<'a, T> OptGuard<'a, T> {
fn new(pin: Pin<&'a mut Option<T>>) -> Self {
OptGuard(pin, false)
}
fn guard_mut(&mut self) -> (Option<Pin<&mut T>>, &mut bool) {
(self.0.as_mut().as_pin_mut(), &mut self.1)
}
}
impl<'a, T> Drop for OptGuard<'a, T> {
fn drop(&mut self) {
if self.1 {
self.0.set(None);
}
}
}
// ===== impl Server =====
impl<S> Server<S> where S: Service {
impl<S> Server<S>
where
S: Service,
{
pub fn new(service: S) -> Server<S> {
Server {
in_flight: None,
in_flight: Box::pin(None),
service: service,
}
}
pub fn into_service(self) -> S {
self.service
}
}
// Service is never pinned
impl<S: Service> Unpin for Server<S> {}
impl<S, Bs> Dispatch for Server<S>
where
S: Service<ReqBody=Body, ResBody=Bs>,
@@ -400,25 +442,23 @@ where
type PollError = S::Error;
type RecvItem = RequestHead;
fn poll_msg(&mut self) -> Poll<Option<(Self::PollItem, Self::PollBody)>, Self::PollError> {
if let Some(mut fut) = self.in_flight.take() {
let resp = match fut.poll()? {
Async::Ready(res) => res,
Async::NotReady => {
self.in_flight = Some(fut);
return Ok(Async::NotReady);
}
};
fn poll_msg(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>> {
let ret = if let Some(ref mut fut) = self.in_flight.as_mut().as_pin_mut() {
let resp = ready!(fut.as_mut().poll(cx)?);
let (parts, body) = resp.into_parts();
let head = MessageHead {
version: parts.version,
subject: parts.status,
headers: parts.headers,
};
Ok(Async::Ready(Some((head, body))))
Poll::Ready(Some(Ok((head, body))))
} else {
unreachable!("poll_msg shouldn't be called if no inflight");
}
};
// Since in_flight finished, remove it
self.in_flight.set(None);
ret
}
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
@@ -428,15 +468,16 @@ where
*req.uri_mut() = msg.subject.1;
*req.headers_mut() = msg.headers;
*req.version_mut() = msg.version;
self.in_flight = Some(self.service.call(req));
let fut = self.service.call(req);
self.in_flight.set(Some(fut));
Ok(())
}
fn poll_ready(&mut self) -> Poll<(), ()> {
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>> {
if self.in_flight.is_some() {
Ok(Async::NotReady)
Poll::Pending
} else {
self.service.poll_ready()
self.service.poll_ready(cx)
.map_err(|_e| {
// FIXME: return error value.
trace!("service closed");
@@ -470,16 +511,16 @@ where
type PollError = Never;
type RecvItem = ResponseHead;
fn poll_msg(&mut self) -> Poll<Option<(Self::PollItem, Self::PollBody)>, Never> {
match self.rx.poll() {
Ok(Async::Ready(Some((req, mut cb)))) => {
fn poll_msg(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Never>>> {
match self.rx.poll_next(cx) {
Poll::Ready(Some((req, mut cb))) => {
// check that future hasn't been canceled already
match cb.poll_cancel().expect("poll_cancel cannot error") {
Async::Ready(()) => {
match cb.poll_cancel(cx) {
Poll::Ready(()) => {
trace!("request canceled");
Ok(Async::Ready(None))
Poll::Ready(None)
},
Async::NotReady => {
Poll::Pending => {
let (parts, body) = req.into_parts();
let head = RequestHead {
version: parts.version,
@@ -487,17 +528,16 @@ where
headers: parts.headers,
};
self.callback = Some(cb);
Ok(Async::Ready(Some((head, body))))
Poll::Ready(Some(Ok((head, body))))
}
}
},
Ok(Async::Ready(None)) => {
Poll::Ready(None) => {
trace!("client tx closed");
// user has dropped sender handle
Ok(Async::Ready(None))
Poll::Ready(None)
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(never) => match never {},
Poll::Pending => Poll::Pending,
}
}
@@ -522,30 +562,32 @@ where
if let Some(cb) = self.callback.take() {
let _ = cb.send(Err((err, None)));
Ok(())
} else if let Ok(Async::Ready(Some((req, cb)))) = self.rx.poll() {
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
let _ = cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
Ok(())
} else {
Err(err)
self.rx.close();
if let Some((req, cb)) = self.rx.try_recv() {
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
let _ = cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
Ok(())
} else {
Err(err)
}
}
}
}
}
fn poll_ready(&mut self) -> Poll<(), ()> {
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>> {
match self.callback {
Some(ref mut cb) => match cb.poll_cancel() {
Ok(Async::Ready(())) => {
Some(ref mut cb) => match cb.poll_cancel(cx) {
Poll::Ready(()) => {
trace!("callback receiver has dropped");
Err(())
Poll::Ready(Err(()))
},
Ok(Async::NotReady) => Ok(Async::Ready(())),
Err(_) => unreachable!("oneshot poll_cancel cannot error"),
Poll::Pending => Poll::Ready(Ok(())),
},
None => Err(()),
None => Poll::Ready(Err(())),
}
}

View File

@@ -5,10 +5,10 @@ use std::fmt;
use std::io;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use futures::{Async, Poll};
use iovec::IoVec;
use tokio_io::{AsyncRead, AsyncWrite};
use crate::common::{Pin, Poll, Unpin, task};
use super::{Http1Transaction, ParseContext, ParsedMessage};
/// The initial buffer size allocated before trying to read from IO.
@@ -52,7 +52,7 @@ where
impl<T, B> Buffered<T, B>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
B: Buf,
{
pub fn new(io: T) -> Buffered<T, B> {
@@ -135,57 +135,56 @@ where
}
}
pub(super) fn parse<S>(&mut self, ctx: ParseContext)
-> Poll<ParsedMessage<S::Incoming>, crate::Error>
pub(super) fn parse<S>(&mut self, cx: &mut task::Context<'_>, parse_ctx: ParseContext)
-> Poll<crate::Result<ParsedMessage<S::Incoming>>>
where
S: Http1Transaction,
{
loop {
match S::parse(&mut self.read_buf, ParseContext {
cached_headers: ctx.cached_headers,
req_method: ctx.req_method,
cached_headers: parse_ctx.cached_headers,
req_method: parse_ctx.req_method,
})? {
Some(msg) => {
debug!("parsed {} headers", msg.head.headers.len());
return Ok(Async::Ready(msg))
return Poll::Ready(Ok(msg));
},
None => {
let max = self.read_buf_strategy.max();
if self.read_buf.len() >= max {
debug!("max_buf_size ({}) reached, closing", max);
return Err(crate::Error::new_too_large());
return Poll::Ready(Err(crate::Error::new_too_large()));
}
},
}
match try_ready!(self.read_from_io().map_err(crate::Error::new_io)) {
match ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? {
0 => {
trace!("parse eof");
return Err(crate::Error::new_incomplete());
return Poll::Ready(Err(crate::Error::new_incomplete()));
}
_ => {},
}
}
}
pub fn read_from_io(&mut self) -> Poll<usize, io::Error> {
pub fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
self.read_blocked = false;
let next = self.read_buf_strategy.next();
if self.read_buf.remaining_mut() < next {
self.read_buf.reserve(next);
}
self.io.read_buf(&mut self.read_buf).map(|ok| {
match ok {
Async::Ready(n) => {
match Pin::new(&mut self.io).poll_read_buf(cx, &mut self.read_buf) {
Poll::Ready(Ok(n)) => {
debug!("read {} bytes", n);
self.read_buf_strategy.record(n);
Async::Ready(n)
Poll::Ready(Ok(n))
},
Async::NotReady => {
self.read_blocked = true;
Async::NotReady
}
Poll::Pending => {
self.read_blocked = true;
Poll::Pending
}
})
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
}
}
pub fn into_inner(self) -> (T, Bytes) {
@@ -200,38 +199,37 @@ where
self.read_blocked
}
pub fn flush(&mut self) -> Poll<(), io::Error> {
pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
if self.flush_pipeline && !self.read_buf.is_empty() {
//Ok(())
Poll::Ready(Ok(()))
} else if self.write_buf.remaining() == 0 {
try_nb!(self.io.flush());
Pin::new(&mut self.io).poll_flush(cx)
} else {
match self.write_buf.strategy {
WriteStrategy::Flatten => return self.flush_flattened(),
WriteStrategy::Flatten => return self.poll_flush_flattened(cx),
_ => (),
}
loop {
let n = try_ready!(self.io.write_buf(&mut self.write_buf.auto()));
let n = ready!(Pin::new(&mut self.io).poll_write_buf(cx, &mut self.write_buf.auto()))?;
debug!("flushed {} bytes", n);
if self.write_buf.remaining() == 0 {
break;
} else if n == 0 {
trace!("write returned zero, but {} bytes remaining", self.write_buf.remaining());
return Err(io::ErrorKind::WriteZero.into())
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
}
try_nb!(self.io.flush())
Pin::new(&mut self.io).poll_flush(cx)
}
Ok(Async::Ready(()))
}
/// Specialized version of `flush` when strategy is Flatten.
///
/// Since all buffered bytes are flattened into the single headers buffer,
/// that skips some bookkeeping around using multiple buffers.
fn flush_flattened(&mut self) -> Poll<(), io::Error> {
fn poll_flush_flattened(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
loop {
let n = try_nb!(self.io.write(self.write_buf.headers.bytes()));
let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.bytes()))?;
debug!("flushed {} bytes", n);
self.write_buf.headers.advance(n);
if self.write_buf.headers.remaining() == 0 {
@@ -239,30 +237,33 @@ where
break;
} else if n == 0 {
trace!("write returned zero, but {} bytes remaining", self.write_buf.remaining());
return Err(io::ErrorKind::WriteZero.into())
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
}
try_nb!(self.io.flush());
Ok(Async::Ready(()))
Pin::new(&mut self.io).poll_flush(cx)
}
}
// The `B` is a `Buf`, we never project a pin to it
impl<T: Unpin, B> Unpin for Buffered<T, B> {}
// TODO: This trait is old... at least rename to PollBytes or something...
pub trait MemRead {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error>;
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>>;
}
impl<T, B> MemRead for Buffered<T, B>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
B: Buf,
{
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
if !self.read_buf.is_empty() {
let n = ::std::cmp::min(len, self.read_buf.len());
Ok(Async::Ready(self.read_buf.split_to(n).freeze()))
Poll::Ready(Ok(self.read_buf.split_to(n).freeze()))
} else {
let n = try_ready!(self.read_from_io());
Ok(Async::Ready(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))
let n = ready!(self.poll_read_from_io(cx))?;
Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))
}
}
}

View File

@@ -1,26 +1,26 @@
use bytes::IntoBuf;
use futures::{Async, Future, Poll, Stream};
use futures::future::{self, Either};
use futures::sync::{mpsc, oneshot};
//use futures::{Async, Future, Poll, Stream};
//use futures::future::{self, Either};
//use futures::sync::{mpsc, oneshot};
use h2::client::{Builder, Handshake, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all;
use crate::body::Payload;
use crate::common::{Exec, Never};
use crate::common::{Exec, Future, Never, Pin, Poll, task};
use crate::headers;
use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
use crate::{Body, Request, Response};
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
/// An mpsc channel is used to help notify the `Connection` task when *all*
/// other handles to it have been dropped, so that it can shutdown.
type ConnDropRef = mpsc::Sender<Never>;
///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown.
//type ConnDropRef = mpsc::Sender<Never>;
/// A oneshot channel watches the `Connection` task, and when it completes,
/// the "dispatch" task will be notified and can shutdown sooner.
type ConnEof = oneshot::Receiver<Never>;
///// A oneshot channel watches the `Connection` task, and when it completes,
///// the "dispatch" task will be notified and can shutdown sooner.
//type ConnEof = oneshot::Receiver<Never>;
pub(crate) struct Client<T, B>
where
@@ -33,7 +33,7 @@ where
enum State<T, B> where B: IntoBuf {
Handshaking(Handshake<T, B>),
Ready(SendRequest<B>, ConnDropRef, ConnEof),
//Ready(SendRequest<B>, ConnDropRef, ConnEof),
}
impl<T, B> Client<T, B>
@@ -42,6 +42,8 @@ where
B: Payload,
{
pub(crate) fn new(io: T, rx: ClientRx<B>, builder: &Builder, exec: Exec) -> Client<T, B> {
unimplemented!("proto::h2::Client::new");
/*
let handshake = builder.handshake(io);
Client {
@@ -49,6 +51,7 @@ where
rx: rx,
state: State::Handshaking(handshake),
}
*/
}
}
@@ -57,10 +60,11 @@ where
T: AsyncRead + AsyncWrite + Send + 'static,
B: Payload + 'static,
{
type Item = Dispatched;
type Error = crate::Error;
type Output = crate::Result<Dispatched>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for proto::h2::Client");
/*
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
@@ -196,5 +200,6 @@ where
};
self.state = next;
}
*/
}
}

View File

@@ -1,5 +1,5 @@
use bytes::Buf;
use futures::{Async, Future, Poll};
//use futures::{Async, Future, Poll};
use h2::{SendStream};
use http::header::{
HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER,
@@ -106,6 +106,7 @@ where
}
}
/*
impl<S> Future for PipeToSendStream<S>
where
S: Payload,
@@ -114,6 +115,8 @@ where
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
unimplemented!("impl Future for PipeToSendStream");
/*
loop {
if !self.data_done {
// we don't have the next chunk of data yet, so just reserve 1 byte to make
@@ -189,8 +192,10 @@ where
}
}
}
*/
}
}
*/
struct SendBuf<B>(Option<B>);

View File

@@ -1,15 +1,15 @@
use std::error::Error as StdError;
use futures::{Async, Future, Poll, Stream};
use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all;
use crate::body::Payload;
use crate::body::internal::FullDataArg;
use crate::common::exec::H2Exec;
use crate::common::{Future, Pin, Poll, task};
use crate::headers;
use crate::headers::content_length_parse_all;
use crate::service::Service;
use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
@@ -26,6 +26,9 @@ where
state: State<T, B>,
}
// TODO: fix me
impl<T, S: Service, B: Payload, E> Unpin for Server<T, S, B, E> {}
enum State<T, B>
where
B: Payload,
@@ -53,15 +56,20 @@ where
E: H2Exec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, builder: &Builder, exec: E) -> Server<T, S, B, E> {
unimplemented!("proto::h2::Server::new")
/*
let handshake = builder.handshake(io);
Server {
exec,
state: State::Handshaking(handshake),
service,
}
*/
}
pub fn graceful_shutdown(&mut self) {
unimplemented!("proto::h2::Server::graceful_shutdown")
/*
trace!("graceful_shutdown");
match self.state {
State::Handshaking(..) => {
@@ -78,6 +86,7 @@ where
}
}
self.state = State::Closed;
*/
}
}
@@ -89,10 +98,11 @@ where
B: Payload,
E: H2Exec<S::Future, B>,
{
type Item = Dispatched;
type Error = crate::Error;
type Output = crate::Result<Dispatched>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("h2 server future")
/*
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
@@ -114,6 +124,7 @@ where
};
self.state = next;
}
*/
}
}
@@ -122,7 +133,7 @@ where
T: AsyncRead + AsyncWrite,
B: Payload,
{
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<(), crate::Error>
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<crate::Result<()>>
where
S: Service<
ReqBody=Body,
@@ -131,6 +142,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
/*
if self.closing.is_none() {
loop {
// At first, polls the readiness of supplied service.
@@ -182,6 +194,8 @@ where
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
Err(self.closing.take().expect("polled after error"))
*/
unimplemented!("h2 server poll_server")
}
}
@@ -204,8 +218,8 @@ where
impl<F, B> H2Stream<F, B>
where
F: Future<Item=Response<B>>,
F::Error: Into<Box<dyn StdError + Send + Sync>>,
//F: Future<Item=Response<B>>,
//F::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
{
fn new(fut: F, respond: SendResponse<SendBuf<B::Data>>) -> H2Stream<F, B> {
@@ -214,8 +228,19 @@ where
state: H2StreamState::Service(fut),
}
}
}
fn poll2(&mut self) -> Poll<(), crate::Error> {
impl<F, B> Future for H2Stream<F, B>
where
//F: Future<Item=Response<B>>,
//F::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
unimplemented!("impl Future for H2Stream");
/*
loop {
let next = match self.state {
H2StreamState::Service(ref mut h) => {
@@ -292,9 +317,10 @@ where
};
self.state = next;
}
*/
}
}
/*
impl<F, B> Future for H2Stream<F, B>
where
F: Future<Item=Response<B>>,
@@ -309,4 +335,5 @@ where
.map_err(|e| debug!("stream error: {}", e))
}
}
*/