tests(client): make retryable request tests more reliable

This commit is contained in:
Sean McArthur
2018-03-05 15:05:24 -08:00
parent 49fcb0663c
commit 994bcd193c
4 changed files with 252 additions and 90 deletions

View File

@@ -36,6 +36,8 @@ mod dns;
mod pool;
#[cfg(feature = "compat")]
pub mod compat;
#[cfg(test)]
mod tests;
/// A Client to make outgoing HTTP requests.
pub struct Client<C, B = proto::Body> {

47
src/client/tests.rs Normal file
View File

@@ -0,0 +1,47 @@
extern crate pretty_env_logger;
use futures::Async;
use futures::future::poll_fn;
use tokio::reactor::Core;
use mock::MockConnector;
use super::*;
#[test]
fn retryable_request() {
let _ = pretty_env_logger::try_init();
let mut core = Core::new().unwrap();
let mut connector = MockConnector::new();
let sock1 = connector.mock("http://mock.local/a");
let sock2 = connector.mock("http://mock.local/b");
let client = Client::configure()
.connector(connector)
.build(&core.handle());
{
let res1 = client.get("http://mock.local/a".parse().unwrap());
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
});
core.run(res1.join(srv1)).expect("res1");
}
drop(sock1);
let res2 = client.get("http://mock.local/b".parse().unwrap())
.map(|res| {
assert_eq!(res.status().as_u16(), 222);
});
let srv2 = poll_fn(|| {
try_ready!(sock2.read(&mut [0u8; 512]));
try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
});
core.run(res2.join(srv2)).expect("res2");
}

View File

@@ -1,26 +1,33 @@
use std::cell::RefCell;
use std::collections::HashMap;
use std::cmp;
use std::io::{self, Read, Write};
use std::rc::Rc;
use bytes::Buf as BufTrait;
use bytes::Buf;
use futures::{Async, Poll};
use futures::task::{self, Task};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_service::Service;
use ::Uri;
#[derive(Debug)]
pub struct Buf {
pub struct MockCursor {
vec: Vec<u8>,
pos: usize,
}
impl Buf {
pub fn wrap(vec: Vec<u8>) -> Buf {
Buf {
impl MockCursor {
pub fn wrap(vec: Vec<u8>) -> MockCursor {
MockCursor {
vec: vec,
pos: 0,
}
}
}
impl ::std::ops::Deref for Buf {
impl ::std::ops::Deref for MockCursor {
type Target = [u8];
fn deref(&self) -> &[u8] {
@@ -28,19 +35,19 @@ impl ::std::ops::Deref for Buf {
}
}
impl AsRef<[u8]> for Buf {
impl AsRef<[u8]> for MockCursor {
fn as_ref(&self) -> &[u8] {
&self.vec
}
}
impl<S: AsRef<[u8]>> PartialEq<S> for Buf {
impl<S: AsRef<[u8]>> PartialEq<S> for MockCursor {
fn eq(&self, other: &S) -> bool {
self.vec == other.as_ref()
}
}
impl Write for Buf {
impl Write for MockCursor {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.vec.extend(data);
Ok(data.len())
@@ -51,7 +58,7 @@ impl Write for Buf {
}
}
impl Read for Buf {
impl Read for MockCursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&self.vec[self.pos..]).read(buf).map(|n| {
self.pos += n;
@@ -71,6 +78,8 @@ pub struct AsyncIo<T> {
inner: T,
max_read_vecs: usize,
num_writes: usize,
park_tasks: bool,
task: Option<Task>,
}
impl<T> AsyncIo<T> {
@@ -83,11 +92,17 @@ impl<T> AsyncIo<T> {
inner: inner,
max_read_vecs: READ_VECS_CNT,
num_writes: 0,
park_tasks: false,
task: None,
}
}
pub fn block_in(&mut self, bytes: usize) {
self.bytes_until_block = bytes;
if let Some(task) = self.task.take() {
task.notify();
}
}
pub fn error(&mut self, err: io::Error) {
@@ -99,6 +114,10 @@ impl<T> AsyncIo<T> {
self.max_read_vecs = cnt;
}
pub fn park_tasks(&mut self, enabled: bool) {
self.park_tasks = enabled;
}
#[cfg(feature = "tokio-proto")]
//TODO: fix proto::conn::tests to not use tokio-proto API,
//and then this cfg flag go away
@@ -113,23 +132,39 @@ impl<T> AsyncIo<T> {
pub fn num_writes(&self) -> usize {
self.num_writes
}
fn would_block(&mut self) -> io::Error {
self.blocked = true;
if self.park_tasks {
self.task = Some(task::current());
}
io::ErrorKind::WouldBlock.into()
}
impl AsyncIo<Buf> {
pub fn new_buf<T: Into<Vec<u8>>>(buf: T, bytes: usize) -> AsyncIo<Buf> {
AsyncIo::new(Buf::wrap(buf.into()), bytes)
}
impl AsyncIo<MockCursor> {
pub fn new_buf<T: Into<Vec<u8>>>(buf: T, bytes: usize) -> AsyncIo<MockCursor> {
AsyncIo::new(MockCursor::wrap(buf.into()), bytes)
}
#[cfg(feature = "tokio-proto")]
//TODO: fix proto::conn::tests to not use tokio-proto API,
//and then this cfg flag go away
pub fn new_eof() -> AsyncIo<Buf> {
AsyncIo::new(Buf::wrap(Vec::new().into()), 1)
pub fn new_eof() -> AsyncIo<MockCursor> {
AsyncIo::new(MockCursor::wrap(Vec::new().into()), 1)
}
fn close(&mut self) {
self.block_in(1);
assert_eq!(self.inner.vec.len(), self.inner.pos);
self.inner.vec.truncate(0);
self.inner.pos = 0;
}
}
impl<T: Read + Write> AsyncIo<T> {
fn write_no_vecs<B: BufTrait>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
fn write_no_vecs<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if !buf.has_remaining() {
return Ok(Async::Ready(0));
}
@@ -153,8 +188,7 @@ impl<T: Read> Read for AsyncIo<T> {
if let Some(err) = self.error.take() {
Err(err)
} else if self.bytes_until_block == 0 {
self.blocked = true;
Err(io::ErrorKind::WouldBlock.into())
Err(self.would_block())
} else {
let n = cmp::min(self.bytes_until_block, buf.len());
let n = try!(self.inner.read(&mut buf[..n]));
@@ -168,11 +202,13 @@ impl<T: Write> Write for AsyncIo<T> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.num_writes += 1;
if let Some(err) = self.error.take() {
trace!("AsyncIo::write error");
Err(err)
} else if self.bytes_until_block == 0 {
Err(io::ErrorKind::WouldBlock.into())
trace!("AsyncIo::write would block");
Err(self.would_block())
} else {
trace!("AsyncIo::write() block_in = {}, data.len() = {}", self.bytes_until_block, data.len());
trace!("AsyncIo::write; {} bytes", data.len());
self.flushed = false;
let n = cmp::min(self.bytes_until_block, data.len());
let n = try!(self.inner.write(&data[..n]));
@@ -195,14 +231,14 @@ impl<T: Read + Write> AsyncWrite for AsyncIo<T> {
Ok(().into())
}
fn write_buf<B: BufTrait>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if self.max_read_vecs == 0 {
return self.write_no_vecs(buf);
}
let r = {
static DUMMY: &[u8] = &[0];
let mut bufs = [From::from(DUMMY); READ_VECS_CNT];
let i = ::bytes::Buf::bytes_vec(&buf, &mut bufs[..self.max_read_vecs]);
let i = Buf::bytes_vec(&buf, &mut bufs[..self.max_read_vecs]);
let mut n = 0;
let mut ret = Ok(0);
// each call to write() will increase our count, but we assume
@@ -231,7 +267,7 @@ impl<T: Read + Write> AsyncWrite for AsyncIo<T> {
};
match r {
Ok(n) => {
::bytes::Buf::advance(buf, n);
Buf::advance(buf, n);
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
@@ -242,10 +278,153 @@ impl<T: Read + Write> AsyncWrite for AsyncIo<T> {
}
}
impl ::std::ops::Deref for AsyncIo<Buf> {
impl ::std::ops::Deref for AsyncIo<MockCursor> {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.inner
}
}
pub struct Duplex {
inner: Rc<RefCell<DuplexInner>>,
}
struct DuplexInner {
handle_read_task: Option<Task>,
read: AsyncIo<MockCursor>,
write: AsyncIo<MockCursor>,
}
impl Read for Duplex {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.borrow_mut().read.read(buf)
}
}
impl Write for Duplex {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if let Some(task) = self.inner.borrow_mut().handle_read_task.take() {
trace!("waking DuplexHandle read");
task.notify();
}
self.inner.borrow_mut().write.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.borrow_mut().write.flush()
}
}
impl AsyncRead for Duplex {
}
impl AsyncWrite for Duplex {
fn shutdown(&mut self) -> Poll<(), io::Error> {
Ok(().into())
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if let Some(task) = self.inner.borrow_mut().handle_read_task.take() {
task.notify();
}
self.inner.borrow_mut().write.write_buf(buf)
}
}
pub struct DuplexHandle {
inner: Rc<RefCell<DuplexInner>>,
}
impl DuplexHandle {
pub fn read(&self, buf: &mut [u8]) -> Poll<usize, io::Error> {
let mut inner = self.inner.borrow_mut();
assert!(buf.len() >= inner.write.inner.len());
if inner.write.inner.is_empty() {
trace!("DuplexHandle read parking");
inner.handle_read_task = Some(task::current());
return Ok(Async::NotReady);
}
inner.write.inner.vec.truncate(0);
Ok(Async::Ready(inner.write.inner.len()))
}
pub fn write(&self, bytes: &[u8]) -> Poll<usize, io::Error> {
let mut inner = self.inner.borrow_mut();
assert!(inner.read.inner.vec.is_empty());
assert_eq!(inner.read.inner.pos, 0);
inner
.read
.inner
.vec
.extend(bytes);
inner.read.block_in(bytes.len());
Ok(Async::Ready(bytes.len()))
}
}
impl Drop for DuplexHandle {
fn drop(&mut self) {
trace!("mock duplex handle drop");
let mut inner = self.inner.borrow_mut();
inner.read.close();
inner.write.close();
}
}
pub struct MockConnector {
mocks: RefCell<HashMap<String, Vec<Duplex>>>,
}
impl MockConnector {
pub fn new() -> MockConnector {
MockConnector {
mocks: RefCell::new(HashMap::new()),
}
}
pub fn mock(&mut self, key: &str) -> DuplexHandle {
let key = key.to_owned();
let mut inner = DuplexInner {
handle_read_task: None,
read: AsyncIo::new_buf(Vec::new(), 0),
write: AsyncIo::new_buf(Vec::new(), ::std::usize::MAX),
};
inner.read.park_tasks(true);
inner.write.park_tasks(true);
let inner = Rc::new(RefCell::new(inner));
let duplex = Duplex {
inner: inner.clone(),
};
let handle = DuplexHandle {
inner: inner,
};
self.mocks.borrow_mut().entry(key)
.or_insert(Vec::new())
.push(duplex);
handle
}
}
impl Service for MockConnector {
type Request = Uri;
type Response = Duplex;
type Error = io::Error;
type Future = ::futures::future::FutureResult<Self::Response, Self::Error>;
fn call(&self, uri: Uri) -> Self::Future {
use futures::future;
trace!("mock connect: {:?}", uri.as_ref());
let mock = self.mocks.borrow_mut()
.get_mut(uri.as_ref())
.expect(&format!("unknown mocks uri: {:?}", uri.as_ref()))
.remove(0);
future::ok(mock)
}
}

View File

@@ -584,72 +584,6 @@ fn client_keep_alive() {
core.run(res.join(rx).map(|r| r.0)).unwrap();
}
#[test]
fn client_keep_alive_connreset() {
use std::sync::mpsc;
extern crate pretty_env_logger;
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let client = client(&handle);
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = mpsc::channel();
thread::spawn(move || {
let mut sock = server.accept().unwrap().0;
sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
sock.set_write_timeout(Some(Duration::from_secs(5))).unwrap();
let mut buf = [0; 4096];
sock.read(&mut buf).expect("read 1");
sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n").expect("write 1");
// Wait for client to indicate it is done processing the first request
// This is what seem to trigger the race condition -- without it client notices
// connection is closed while processing the first request.
let _ = rx2.recv();
let _r = sock.shutdown(std::net::Shutdown::Both);
// Let client know it can try to reuse the connection
let _ = tx1.send(());
// use sock2 so that sock isn't dropped yet
let mut sock2 = server.accept().unwrap().0;
sock2.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
sock2.set_write_timeout(Some(Duration::from_secs(5))).unwrap();
let mut buf = [0; 4096];
sock2.read(&mut buf).expect("read 2");
sock2.write_all(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n").expect("write 2");
});
let res = client.get(format!("http://{}/a", addr).parse().unwrap());
core.run(res).unwrap();
let _ = tx2.send(());
let rx = rx1.map_err(|_| hyper::Error::Io(io::Error::new(io::ErrorKind::Other, "thread panicked")));
core.run(rx).unwrap();
let t = Timeout::new(Duration::from_millis(100), &handle).unwrap();
let res = client.get(format!("http://{}/b", addr).parse().unwrap())
.map(|res| {
assert_eq!(res.status().as_u16(), 222);
});
let fut = res.select2(t).then(|result| match result {
Ok(Either::A((resp, _))) => Ok(resp),
Err(Either::A((err, _))) => Err(err),
Ok(Either::B(_)) |
Err(Either::B(_)) => Err(hyper::Error::Timeout),
});
core.run(fut).expect("req 2");
}
#[test]
fn client_keep_alive_extra_body() {
let _ = pretty_env_logger::try_init();