test(h1): re-enable the proto::h1::io tests

This commit is contained in:
Sean McArthur
2019-07-16 14:03:01 -07:00
parent 9ae1873756
commit 1f6575279e
3 changed files with 84 additions and 345 deletions

View File

@@ -19,300 +19,7 @@ use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "runtime")]
use crate::client::connect::{Connect, Connected, Destination};
#[derive(Debug)]
pub struct MockCursor {
vec: Vec<u8>,
pos: usize,
}
impl MockCursor {
pub fn wrap(vec: Vec<u8>) -> MockCursor {
MockCursor {
vec: vec,
pos: 0,
}
}
}
impl ::std::ops::Deref for MockCursor {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.vec
}
}
impl AsRef<[u8]> for MockCursor {
fn as_ref(&self) -> &[u8] {
&self.vec
}
}
impl<S: AsRef<[u8]>> PartialEq<S> for MockCursor {
fn eq(&self, other: &S) -> bool {
self.vec == other.as_ref()
}
}
impl Write for MockCursor {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
trace!("MockCursor::write; len={}", data.len());
self.vec.extend(data);
Ok(data.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Read for MockCursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&self.vec[self.pos..]).read(buf).map(|n| {
trace!("MockCursor::read; len={}", n);
self.pos += n;
if self.pos == self.vec.len() {
trace!("MockCursor::read to end, clearing");
self.pos = 0;
self.vec.clear();
}
n
})
}
}
const READ_VECS_CNT: usize = 64;
#[derive(Debug)]
pub struct AsyncIo<T> {
blocked: bool,
bytes_until_block: usize,
error: Option<io::Error>,
flushed: bool,
inner: T,
max_read_vecs: usize,
num_writes: usize,
panic: bool,
park_tasks: bool,
task: Option<Task>,
}
impl<T> AsyncIo<T> {
pub fn new(inner: T, bytes: usize) -> AsyncIo<T> {
AsyncIo {
blocked: false,
bytes_until_block: bytes,
error: None,
flushed: false,
inner: inner,
max_read_vecs: READ_VECS_CNT,
num_writes: 0,
panic: false,
park_tasks: false,
task: None,
}
}
pub fn block_in(&mut self, bytes: usize) {
self.bytes_until_block = bytes;
if let Some(task) = self.task.take() {
task.notify();
}
}
pub fn error(&mut self, err: io::Error) {
self.error = Some(err);
}
#[cfg(feature = "nightly")]
pub fn panic(&mut self) {
self.panic = true;
}
pub fn max_read_vecs(&mut self, cnt: usize) {
assert!(cnt <= READ_VECS_CNT);
self.max_read_vecs = cnt;
}
#[cfg(feature = "runtime")]
pub fn park_tasks(&mut self, enabled: bool) {
self.park_tasks = enabled;
}
/*
pub fn flushed(&self) -> bool {
self.flushed
}
*/
pub fn blocked(&self) -> bool {
self.blocked
}
pub fn num_writes(&self) -> usize {
self.num_writes
}
fn would_block(&mut self) -> io::Error {
self.blocked = true;
if self.park_tasks {
self.task = Some(task::current());
}
io::ErrorKind::WouldBlock.into()
}
}
impl AsyncIo<MockCursor> {
pub fn new_buf<T: Into<Vec<u8>>>(buf: T, bytes: usize) -> AsyncIo<MockCursor> {
AsyncIo::new(MockCursor::wrap(buf.into()), bytes)
}
/*
pub fn new_eof() -> AsyncIo<Buf> {
AsyncIo::new(Buf::wrap(Vec::new().into()), 1)
}
*/
#[cfg(feature = "runtime")]
fn close(&mut self) {
self.block_in(1);
assert_eq!(
self.inner.vec.len(),
self.inner.pos,
"AsyncIo::close(), but cursor not consumed",
);
self.inner.vec.truncate(0);
self.inner.pos = 0;
}
}
impl<T: Read + Write> AsyncIo<T> {
fn write_no_vecs<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if !buf.has_remaining() {
return Ok(Async::Ready(0));
}
let n = try_nb!(self.write(buf.bytes()));
buf.advance(n);
Ok(Async::Ready(n))
}
}
impl<S: AsRef<[u8]>, T: AsRef<[u8]>> PartialEq<S> for AsyncIo<T> {
fn eq(&self, other: &S) -> bool {
self.inner.as_ref() == other.as_ref()
}
}
impl<T: Read> Read for AsyncIo<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert!(!self.panic, "AsyncIo::read panic");
self.blocked = false;
if let Some(err) = self.error.take() {
Err(err)
} else if self.bytes_until_block == 0 {
Err(self.would_block())
} else {
let n = cmp::min(self.bytes_until_block, buf.len());
let n = self.inner.read(&mut buf[..n])?;
self.bytes_until_block -= n;
Ok(n)
}
}
}
impl<T: Write> Write for AsyncIo<T> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
assert!(!self.panic, "AsyncIo::write panic");
self.num_writes += 1;
if let Some(err) = self.error.take() {
trace!("AsyncIo::write error");
Err(err)
} else if self.bytes_until_block == 0 {
trace!("AsyncIo::write would block");
Err(self.would_block())
} else {
trace!("AsyncIo::write; {} bytes", data.len());
self.flushed = false;
let n = cmp::min(self.bytes_until_block, data.len());
let n = self.inner.write(&data[..n])?;
self.bytes_until_block -= n;
Ok(n)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flushed = true;
self.inner.flush()
}
}
impl<T: Read + Write> AsyncRead for AsyncIo<T> {
}
impl<T: Read + Write> AsyncWrite for AsyncIo<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
Ok(().into())
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
assert!(!self.panic, "AsyncIo::write_buf panic");
if self.max_read_vecs == 0 {
return self.write_no_vecs(buf);
}
let r = {
static DUMMY: &[u8] = &[0];
let mut bufs = [From::from(DUMMY); READ_VECS_CNT];
let i = Buf::bytes_vec(&buf, &mut bufs[..self.max_read_vecs]);
let mut n = 0;
let mut ret = Ok(0);
// each call to write() will increase our count, but we assume
// that if iovecs are used, its really only 1 write call.
let num_writes = self.num_writes;
for iovec in &bufs[..i] {
match self.write(iovec) {
Ok(num) => {
n += num;
ret = Ok(n);
},
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
if let Ok(0) = ret {
ret = Err(e);
}
} else {
ret = Err(e);
}
break;
}
}
}
self.num_writes = num_writes + 1;
ret
};
match r {
Ok(n) => {
Buf::advance(buf, n);
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
Ok(Async::NotReady)
}
Err(e) => Err(e),
}
}
}
impl ::std::ops::Deref for AsyncIo<MockCursor> {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.inner
}
}
#[cfg(feature = "runtime")]
pub struct Duplex {