refactor(lib): convert usage of tokio_core::io to tokio_io
This commit updates to the most recent versions (released today) of the various Tokio libraries in use. Namely the `tokio_core::io` module has now been deprecated in favor of an external `tokio-io` crate. This commit pulls in that crate and uses the `AsyncRead + AsyncWrite` abstraction instead of `Io` from tokio-core. BREAKING CHANGE: Any external types that were using that had implemented `Io` will need to implement `AsyncRead + AsyncWrite` from tokio_io.
This commit is contained in:
committed by
Sean McArthur
parent
34509ef51a
commit
8554904dc9
@@ -5,7 +5,7 @@ use std::time::Instant;
|
||||
|
||||
use futures::{Poll, Async, AsyncSink, Stream, Sink, StartSend};
|
||||
use futures::task::Task;
|
||||
use tokio::io::Io;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use tokio_proto::streaming::pipeline::{Frame, Transport};
|
||||
|
||||
use header::{ContentLength, TransferEncoding};
|
||||
@@ -16,7 +16,7 @@ use version::HttpVersion;
|
||||
|
||||
|
||||
/// This handles a connection, which will have been established over an
|
||||
/// `Io` (like a socket), and will likely include multiple
|
||||
/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple
|
||||
/// `Transaction`s over HTTP.
|
||||
///
|
||||
/// The connection will determine when a message begins and ends as well as
|
||||
@@ -29,7 +29,7 @@ pub struct Conn<I, B, T, K = KA> {
|
||||
}
|
||||
|
||||
impl<I, B, T, K> Conn<I, B, T, K>
|
||||
where I: Io,
|
||||
where I: AsyncRead + AsyncWrite,
|
||||
B: AsRef<[u8]>,
|
||||
T: Http1Transaction,
|
||||
K: KeepAlive
|
||||
@@ -155,7 +155,7 @@ where I: Io,
|
||||
}
|
||||
|
||||
fn maybe_park_read(&mut self) {
|
||||
if self.io.poll_read().is_ready() {
|
||||
if !self.io.is_read_blocked() {
|
||||
// the Io object is ready to read, which means it will never alert
|
||||
// us that it is ready until we drain it. However, we're currently
|
||||
// finished reading, so we need to park the task to be able to
|
||||
@@ -350,7 +350,7 @@ where I: Io,
|
||||
}
|
||||
|
||||
impl<I, B, T, K> Stream for Conn<I, B, T, K>
|
||||
where I: Io,
|
||||
where I: AsyncRead + AsyncWrite,
|
||||
B: AsRef<[u8]>,
|
||||
T: Http1Transaction,
|
||||
K: KeepAlive,
|
||||
@@ -385,7 +385,7 @@ where I: Io,
|
||||
}
|
||||
|
||||
impl<I, B, T, K> Sink for Conn<I, B, T, K>
|
||||
where I: Io,
|
||||
where I: AsyncRead + AsyncWrite,
|
||||
B: AsRef<[u8]>,
|
||||
T: Http1Transaction,
|
||||
K: KeepAlive,
|
||||
@@ -450,10 +450,15 @@ where I: Io,
|
||||
trace!("Conn::flush = {:?}", ret);
|
||||
ret
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
try_ready!(self.poll_complete());
|
||||
self.io.io_mut().shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, B, T, K> Transport for Conn<I, B, T, K>
|
||||
where I: Io + 'static,
|
||||
where I: AsyncRead + AsyncWrite + 'static,
|
||||
B: AsRef<[u8]> + 'static,
|
||||
T: Http1Transaction + 'static,
|
||||
K: KeepAlive + 'static,
|
||||
@@ -665,6 +670,7 @@ impl<'a, T: fmt::Debug + 'a, B: AsRef<[u8]> + 'a> fmt::Debug for DebugFrame<'a,
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use futures::{Async, Future, Stream, Sink};
|
||||
use futures::future;
|
||||
use tokio_proto::streaming::pipeline::Frame;
|
||||
|
||||
use http::{self, MessageHead, ServerTransaction};
|
||||
@@ -705,7 +711,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_parse_partial() {
|
||||
let _: Result<(), ()> = ::futures::lazy(|| {
|
||||
let _: Result<(), ()> = future::lazy(|| {
|
||||
let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec();
|
||||
let io = AsyncIo::new_buf(good_message, 10);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
@@ -772,7 +778,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_body_write_length() {
|
||||
let _: Result<(), ()> = ::futures::lazy(|| {
|
||||
let _: Result<(), ()> = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 0);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
let max = ::http::io::MAX_BUFFER_SIZE + 4096;
|
||||
@@ -800,7 +806,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_body_write_chunked() {
|
||||
let _: Result<(), ()> = ::futures::lazy(|| {
|
||||
let _: Result<(), ()> = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 4096);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
conn.state.writing = Writing::Body(Encoder::chunked(), None);
|
||||
@@ -813,7 +819,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_body_flush() {
|
||||
let _: Result<(), ()> = ::futures::lazy(|| {
|
||||
let _: Result<(), ()> = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
conn.state.writing = Writing::Body(Encoder::length(1024 * 1024), None);
|
||||
@@ -829,7 +835,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_conn_parking() {
|
||||
use std::sync::Arc;
|
||||
use futures::task::Unpark;
|
||||
use futures::executor::Unpark;
|
||||
|
||||
struct Car {
|
||||
permit: bool,
|
||||
@@ -847,7 +853,7 @@ mod tests {
|
||||
}
|
||||
|
||||
// test that once writing is done, unparks
|
||||
let f = ::futures::lazy(|| {
|
||||
let f = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 4096);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
conn.state.reading = Reading::KeepAlive;
|
||||
@@ -861,7 +867,7 @@ mod tests {
|
||||
|
||||
|
||||
// test that flushing when not waiting on read doesn't unpark
|
||||
let f = ::futures::lazy(|| {
|
||||
let f = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 4096);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
conn.state.writing = Writing::KeepAlive;
|
||||
@@ -872,7 +878,7 @@ mod tests {
|
||||
|
||||
|
||||
// test that flushing and writing isn't done doesn't unpark
|
||||
let f = ::futures::lazy(|| {
|
||||
let f = future::lazy(|| {
|
||||
let io = AsyncIo::new_buf(vec![], 4096);
|
||||
let mut conn = Conn::<_, http::Chunk, ServerTransaction>::new(io, Default::default());
|
||||
conn.state.reading = Reading::KeepAlive;
|
||||
|
||||
@@ -295,7 +295,7 @@ mod tests {
|
||||
let (a, b) = self.split_at(n);
|
||||
let mut buf = BytesMut::from(a);
|
||||
*self = b;
|
||||
Ok(buf.drain_to(n).freeze())
|
||||
Ok(buf.split_to(n).freeze())
|
||||
} else {
|
||||
Ok(Bytes::new())
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ impl Http1Transaction for ServerTransaction {
|
||||
};
|
||||
|
||||
let mut headers = Headers::with_capacity(headers_len);
|
||||
let slice = buf.drain_to(len).freeze();
|
||||
let slice = buf.split_to(len).freeze();
|
||||
let path = slice.slice(path.0, path.1);
|
||||
// path was found to be utf8 by httparse
|
||||
let path = unsafe { ByteStr::from_utf8_unchecked(path) };
|
||||
@@ -171,7 +171,7 @@ impl Http1Transaction for ClientTransaction {
|
||||
};
|
||||
|
||||
let mut headers = Headers::with_capacity(headers_len);
|
||||
let slice = buf.drain_to(len).freeze();
|
||||
let slice = buf.split_to(len).freeze();
|
||||
headers.extend(HeadersAsBytesIter {
|
||||
headers: headers_indices[..headers_len].iter(),
|
||||
slice: slice,
|
||||
|
||||
@@ -3,8 +3,7 @@ use std::fmt;
|
||||
use std::io::{self, Write};
|
||||
use std::ptr;
|
||||
|
||||
use futures::Async;
|
||||
use tokio::io::Io;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use http::{Http1Transaction, h1, MessageHead, ParseResult, DebugTruncate};
|
||||
use bytes::{BytesMut, Bytes};
|
||||
@@ -14,6 +13,7 @@ pub const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
|
||||
|
||||
pub struct Buffered<T> {
|
||||
io: T,
|
||||
read_blocked: bool,
|
||||
read_buf: BytesMut,
|
||||
write_buf: WriteBuf,
|
||||
}
|
||||
@@ -27,12 +27,13 @@ impl<T> fmt::Debug for Buffered<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Io> Buffered<T> {
|
||||
impl<T: AsyncRead + AsyncWrite> Buffered<T> {
|
||||
pub fn new(io: T) -> Buffered<T> {
|
||||
Buffered {
|
||||
io: io,
|
||||
read_buf: BytesMut::with_capacity(0),
|
||||
write_buf: WriteBuf::new(),
|
||||
read_blocked: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,14 +50,10 @@ impl<T: Io> Buffered<T> {
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
self.read_buf.drain_to(i);
|
||||
self.read_buf.split_to(i);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_read(&mut self) -> Async<()> {
|
||||
self.io.poll_read()
|
||||
}
|
||||
|
||||
pub fn parse<S: Http1Transaction>(&mut self) -> ::Result<Option<MessageHead<S::Incoming>>> {
|
||||
self.reserve_read_buf();
|
||||
match self.read_from_io() {
|
||||
@@ -88,8 +85,17 @@ impl<T: Io> Buffered<T> {
|
||||
|
||||
fn read_from_io(&mut self) -> io::Result<usize> {
|
||||
use bytes::BufMut;
|
||||
self.read_blocked = false;
|
||||
unsafe {
|
||||
let n = try!(self.io.read(self.read_buf.bytes_mut()));
|
||||
let n = match self.io.read(self.read_buf.bytes_mut()) {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::WouldBlock {
|
||||
self.read_blocked = true;
|
||||
}
|
||||
return Err(e)
|
||||
}
|
||||
};
|
||||
self.read_buf.advance_mut(n);
|
||||
Ok(n)
|
||||
}
|
||||
@@ -112,10 +118,13 @@ impl<T: Io> Buffered<T> {
|
||||
self.write_buf.buffer(buf.as_ref())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn io_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
|
||||
pub fn is_read_blocked(&self) -> bool {
|
||||
self.read_blocked
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Write> Write for Buffered<T> {
|
||||
@@ -146,17 +155,17 @@ pub trait MemRead {
|
||||
fn read_mem(&mut self, len: usize) -> io::Result<Bytes>;
|
||||
}
|
||||
|
||||
impl<T: Io> MemRead for Buffered<T> {
|
||||
impl<T: AsyncRead + AsyncWrite> MemRead for Buffered<T> {
|
||||
fn read_mem(&mut self, len: usize) -> io::Result<Bytes> {
|
||||
trace!("Buffered.read_mem read_buf={}, wanted={}", self.read_buf.len(), len);
|
||||
if !self.read_buf.is_empty() {
|
||||
let n = ::std::cmp::min(len, self.read_buf.len());
|
||||
trace!("Buffered.read_mem read_buf is not empty, slicing {}", n);
|
||||
Ok(self.read_buf.drain_to(n).freeze())
|
||||
Ok(self.read_buf.split_to(n).freeze())
|
||||
} else {
|
||||
self.reserve_read_buf();
|
||||
let n = try!(self.read_from_io());
|
||||
Ok(self.read_buf.drain_to(::std::cmp::min(len, n)).freeze())
|
||||
Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user