Avoid reclaiming frames for dead streams. (#262)

In `clear_queue` we drop all the queued frames for a stream, but this doesn't
take into account a buffered frame inside of the `FramedWrite`. This can lead
to a panic when `reclaim_frame` tries to recover a frame onto a stream that has
already been destroyed, or in general cause wrong behaviour.

Instead, let's keep track of what frame is currently in-flight; then, when we
`clear_queue` a stream with an in-flight data frame, mark the frame to be
dropped instead of reclaimed.
This commit is contained in:
Geoffry Song
2018-04-24 16:52:24 -07:00
committed by Carl Lerche
parent 11f914150e
commit 558e6b6e6c
3 changed files with 203 additions and 5 deletions

View File

@@ -878,3 +878,70 @@ fn rst_while_closing() {
client.join(srv).wait().expect("wait");
}
#[test]
fn rst_with_buffered_data() {
use futures::future::lazy;
// Data is buffered in `FramedWrite` and the stream is reset locally before
// the data is fully flushed. Given that resetting a stream requires
// clearing all associated state for that stream, this test ensures that the
// buffered up frame is correctly handled.
let _ = ::env_logger::try_init();
// This allows the settings + headers frame through
let (io, srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = ::futures::sync::oneshot::channel();
let srv = srv.assert_client_handshake()
.unwrap()
.recv_settings()
.recv_frame(
frames::headers(1)
.request("POST", "https://example.com/")
)
.buffer_bytes(128)
.send_frame(frames::headers(1).response(204).eos())
.send_frame(frames::reset(1).cancel())
.wait_for(rx)
.unbounded_bytes()
.recv_frame(
frames::data(1, vec![0; 16_384]))
.close()
;
// A large body
let body = vec![0; 2 * frame::DEFAULT_INITIAL_WINDOW_SIZE as usize];
let client = client::handshake(io)
.expect("handshake")
.and_then(|(mut client, conn)| {
let request = Request::builder()
.method(Method::POST)
.uri("https://example.com/")
.body(())
.unwrap();
// Send the request
let (resp, mut stream) = client.send_request(request, false)
.expect("send_request");
// Send the data
stream.send_data(body.into(), true).unwrap();
conn.drive({
resp.then(|res| {
Ok::<_, ()>(())
})
})
})
.and_then(move |(conn, _)| {
tx.send(()).unwrap();
conn.unwrap()
});
client.join(srv).wait().expect("wait");
}

View File

@@ -10,7 +10,7 @@ use futures::task::{self, Task};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::read_exact;
use std::{cmp, fmt, io};
use std::{cmp, fmt, io, usize};
use std::io::ErrorKind::WouldBlock;
use std::sync::{Arc, Mutex};
@@ -32,10 +32,25 @@ pub struct Pipe {
#[derive(Debug)]
struct Inner {
/// Data written by the test case to the h2 lib.
rx: Vec<u8>,
/// Notify when data is ready to be received.
rx_task: Option<Task>,
/// Data written by the `h2` library to be read by the test case.
tx: Vec<u8>,
/// Notify when data is written. This notifies the test case waiters.
tx_task: Option<Task>,
/// Number of bytes that can be written before `write` returns `NotReady`.
tx_rem: usize,
/// Task to notify when write capacity becomes available.
tx_rem_task: Option<Task>,
/// True when the pipe is closed.
closed: bool,
}
@@ -43,11 +58,18 @@ const PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
/// Create a new mock and handle
pub fn new() -> (Mock, Handle) {
new_with_write_capacity(usize::MAX)
}
/// Create a new mock and handle allowing up to `cap` bytes to be written.
pub fn new_with_write_capacity(cap: usize) -> (Mock, Handle) {
let inner = Arc::new(Mutex::new(Inner {
rx: vec![],
rx_task: None,
tx: vec![],
tx_task: None,
tx_rem: cap,
tx_rem_task: None,
closed: false,
}));
@@ -303,14 +325,24 @@ impl io::Read for Mock {
impl AsyncRead for Mock {}
impl io::Write for Mock {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
let mut me = self.pipe.inner.lock().unwrap();
if me.closed {
return Err(io::Error::new(io::ErrorKind::BrokenPipe, "mock closed"));
}
if me.tx_rem == 0 {
me.tx_rem_task = Some(task::current());
return Err(io::ErrorKind::WouldBlock.into());
}
if buf.len() > me.tx_rem {
buf = &buf[..me.tx_rem];
}
me.tx.extend(buf);
me.tx_rem -= buf.len();
if let Some(task) = me.tx_task.take() {
task.notify();
@@ -477,6 +509,70 @@ pub trait HandleFutureExt {
}))
}
fn buffer_bytes(self, num: usize) -> Box<Future<Item = Handle, Error = Self::Error>>
where Self: Sized + 'static,
Self: Future<Item = Handle>,
Self::Error: fmt::Debug,
{
use futures::future::poll_fn;
Box::new(self.and_then(move |mut handle| {
// Set tx_rem to num
{
let mut i = handle.codec.get_mut().inner.lock().unwrap();
i.tx_rem = num;
}
let mut handle = Some(handle);
poll_fn(move || {
{
let mut inner = handle.as_mut().unwrap()
.codec.get_mut().inner.lock().unwrap();
if inner.tx_rem == 0 {
inner.tx_rem = usize::MAX;
} else {
inner.tx_task = Some(task::current());
return Ok(Async::NotReady);
}
}
Ok(handle.take().unwrap().into())
})
}))
}
fn unbounded_bytes(self) -> Box<Future<Item = Handle, Error = Self::Error>>
where Self: Sized + 'static,
Self: Future<Item = Handle>,
Self::Error: fmt::Debug,
{
Box::new(self.and_then(|mut handle| {
{
let mut i = handle.codec.get_mut().inner.lock().unwrap();
i.tx_rem = usize::MAX;
if let Some(task) = i.tx_rem_task.take() {
task.notify();
}
}
Ok(handle.into())
}))
}
fn then_notify(self, tx: oneshot::Sender<()>) -> Box<Future<Item = Handle, Error = Self::Error>>
where Self: Sized + 'static,
Self: Future<Item = Handle>,
Self::Error: fmt::Debug,
{
Box::new(self.map(move |handle| {
tx.send(()).unwrap();
handle
}))
}
fn wait_for<F>(self, other: F) -> Box<Future<Item = Self::Item, Error = Self::Error>>
where
F: Future + 'static,