Update to Tokio 0.2 (#428)

This commit is contained in:
Sean McArthur
2019-11-27 14:53:57 -08:00
committed by GitHub
parent 37b66e8981
commit 4398e169e8
53 changed files with 473 additions and 972 deletions

View File

@@ -10,5 +10,5 @@ edition = "2018"
[dev-dependencies]
h2-support = { path = "../h2-support" }
log = "0.4.1"
futures-preview = "=0.3.0-alpha.19"
tokio = "=0.2.0-alpha.6"
futures = { version = "0.3", default-features = false, features = ["alloc"] }
tokio = { version = "0.2", features = ["macros", "tcp"] }

View File

@@ -418,7 +418,7 @@ async fn send_reset_notifies_recv_stream() {
// We don't want a join, since any of the other futures notifying
// will make the rx future polled again, but we are
// specifically testing that rx gets notified on its own.
let mut unordered = FuturesUnordered::<Pin<Box<dyn Future<Output = ()>>>>::new();
let unordered = FuturesUnordered::<Pin<Box<dyn Future<Output = ()>>>>::new();
unordered.push(Box::pin(rx));
unordered.push(Box::pin(tx));
@@ -754,7 +754,7 @@ async fn pending_send_request_gets_reset_by_peer_properly() {
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
let payload = vec![0; (frame::DEFAULT_INITIAL_WINDOW_SIZE * 2) as usize];
let payload = Bytes::from(vec![0; (frame::DEFAULT_INITIAL_WINDOW_SIZE * 2) as usize]);
let max_frame_size = frame::DEFAULT_MAX_FRAME_SIZE as usize;
let srv = async {
@@ -811,7 +811,7 @@ async fn pending_send_request_gets_reset_by_peer_properly() {
};
// Send the data
stream.send_data(payload[..].into(), true).unwrap();
stream.send_data(payload.clone(), true).unwrap();
conn.drive(response).await;
drop(client);
drop(stream);
@@ -897,7 +897,7 @@ async fn notify_on_send_capacity() {
// This test ensures that the client gets notified when there is additional
// send capacity. In other words, when the server is ready to accept a new
// stream, the client is notified.
use futures::channel::oneshot;
use tokio::sync::oneshot;
let _ = env_logger::try_init();
@@ -1016,13 +1016,14 @@ async fn send_stream_poll_reset() {
async fn drop_pending_open() {
// This test checks that a stream queued for pending open behaves correctly when its
// client drops.
use tokio::sync::oneshot;
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
let (init_tx, init_rx) = futures::channel::oneshot::channel();
let (trigger_go_away_tx, trigger_go_away_rx) = futures::channel::oneshot::channel();
let (sent_go_away_tx, sent_go_away_rx) = futures::channel::oneshot::channel();
let (drop_tx, drop_rx) = futures::channel::oneshot::channel();
let (init_tx, init_rx) = oneshot::channel();
let (trigger_go_away_tx, trigger_go_away_rx) = oneshot::channel();
let (sent_go_away_tx, sent_go_away_rx) = oneshot::channel();
let (drop_tx, drop_rx) = oneshot::channel();
let mut settings = frame::Settings::default();
settings.set_max_concurrent_streams(Some(2));
@@ -1103,11 +1104,12 @@ async fn malformed_response_headers_dont_unlink_stream() {
// This test checks that receiving malformed headers frame on a stream with
// no remaining references correctly resets the stream, without prematurely
// unlinking it.
use tokio::sync::oneshot;
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
let (drop_tx, drop_rx) = futures::channel::oneshot::channel();
let (queued_tx, queued_rx) = futures::channel::oneshot::channel();
let (drop_tx, drop_rx) = oneshot::channel();
let (queued_tx, queued_rx) = oneshot::channel();
let srv = async move {
let settings = srv.assert_client_handshake().await;

View File

@@ -175,8 +175,7 @@ async fn read_continuation_frames() {
let expected = large
.iter()
.fold(HeaderMap::new(), |mut map, &(name, ref value)| {
use h2_support::frames::HttpTryInto;
map.append(name, value.as_str().try_into().unwrap());
map.append(name, value.parse().unwrap());
map
});
assert_eq!(head.headers, expected);

View File

@@ -27,10 +27,10 @@ async fn write_continuation_frames() {
let (mut client, mut conn) = client::handshake(io).await.expect("handshake");
let mut request = Request::builder();
request.uri("https://http2.akamai.com/");
request = request.uri("https://http2.akamai.com/");
for &(name, ref value) in &large {
request.header(name, &value[..]);
request = request.header(name, &value[..]);
}
let request = request.body(()).unwrap();

View File

@@ -9,7 +9,7 @@ use h2_support::util::yield_once;
async fn send_data_without_requesting_capacity() {
let _ = env_logger::try_init();
let payload = [0; 1024];
let payload = vec![0; 1024];
let mock = mock_io::Builder::new()
.handshake()
@@ -42,7 +42,7 @@ async fn send_data_without_requesting_capacity() {
assert_eq!(stream.capacity(), 0);
// Send the data
stream.send_data(payload[..].into(), true).unwrap();
stream.send_data(payload.into(), true).unwrap();
// Get the response
let resp = h2.run(response).await.unwrap();
@@ -93,17 +93,17 @@ async fn release_capacity_sends_window_update() {
let mut body = resp.into_parts().1;
// read some body to use up window size to below half
let buf = body.next().await.unwrap().unwrap();
let buf = body.data().await.unwrap().unwrap();
assert_eq!(buf.len(), payload_len);
let buf = body.next().await.unwrap().unwrap();
let buf = body.data().await.unwrap().unwrap();
assert_eq!(buf.len(), payload_len);
let buf = body.next().await.unwrap().unwrap();
let buf = body.data().await.unwrap().unwrap();
assert_eq!(buf.len(), payload_len);
body.flow_control().release_capacity(buf.len() * 2).unwrap();
let buf = body.next().await.unwrap().unwrap();
let buf = body.data().await.unwrap().unwrap();
assert_eq!(buf.len(), payload_len);
};
@@ -153,11 +153,11 @@ async fn release_capacity_of_small_amount_does_not_send_window_update() {
assert_eq!(resp.status(), StatusCode::OK);
let mut body = resp.into_parts().1;
assert!(!body.is_end_stream());
let buf = body.next().await.unwrap().unwrap();
let buf = body.data().await.unwrap().unwrap();
// read the small body and then release it
assert_eq!(buf.len(), 16);
body.flow_control().release_capacity(buf.len()).unwrap();
let buf = body.next().await;
let buf = body.data().await;
assert!(buf.is_none());
};
join(async move { h2.await.unwrap() }, req).await;
@@ -213,7 +213,7 @@ async fn recv_data_overflows_connection_window() {
let resp = client.send_request(request, true).unwrap().0.await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_parts().1;
let res = body.try_concat().await;
let res = util::concat(body).await;
let err = res.unwrap_err();
assert_eq!(
err.to_string(),
@@ -274,7 +274,7 @@ async fn recv_data_overflows_stream_window() {
let resp = client.send_request(request, true).unwrap().0.await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_parts().1;
let res = body.try_concat().await;
let res = util::concat(body).await;
let err = res.unwrap_err();
assert_eq!(
err.to_string(),
@@ -685,8 +685,7 @@ async fn reserved_capacity_assigned_in_multi_window_updates() {
#[tokio::test]
async fn connection_notified_on_released_capacity() {
use futures::channel::mpsc;
use futures::channel::oneshot;
use tokio::sync::{mpsc, oneshot};
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
@@ -695,7 +694,7 @@ async fn connection_notified_on_released_capacity() {
// notifications. This test is here, in part, to ensure that the connection
// receives the appropriate notifications to send out window updates.
let (tx, mut rx) = mpsc::unbounded();
let (tx, mut rx) = mpsc::unbounded_channel();
// Because threading is fun
let (settings_tx, settings_rx) = oneshot::channel();
@@ -744,11 +743,11 @@ async fn connection_notified_on_released_capacity() {
h2.drive(settings_rx).await.unwrap();
let request = Request::get("https://example.com/a").body(()).unwrap();
tx.unbounded_send(client.send_request(request, true).unwrap().0)
tx.send(client.send_request(request, true).unwrap().0)
.unwrap();
let request = Request::get("https://example.com/b").body(()).unwrap();
tx.unbounded_send(client.send_request(request, true).unwrap().0)
tx.send(client.send_request(request, true).unwrap().0)
.unwrap();
tokio::spawn(async move {
@@ -760,8 +759,8 @@ async fn connection_notified_on_released_capacity() {
});
// Get the two requests
let a = rx.next().await.unwrap();
let b = rx.next().await.unwrap();
let a = rx.recv().await.unwrap();
let b = rx.recv().await.unwrap();
// Get the first response
let response = a.await.unwrap();
@@ -769,7 +768,7 @@ async fn connection_notified_on_released_capacity() {
let (_, mut a) = response.into_parts();
// Get the next chunk
let chunk = a.next().await.unwrap();
let chunk = a.data().await.unwrap();
assert_eq!(16_384, chunk.unwrap().len());
// Get the second response
@@ -778,7 +777,7 @@ async fn connection_notified_on_released_capacity() {
let (_, mut b) = response.into_parts();
// Get the next chunk
let chunk = b.next().await.unwrap();
let chunk = b.data().await.unwrap();
assert_eq!(16_384, chunk.unwrap().len());
// Wait a bit
@@ -944,7 +943,6 @@ async fn recv_no_init_window_then_receive_some_init_window() {
async fn settings_lowered_capacity_returns_capacity_to_connection() {
use futures::channel::oneshot;
use futures::future::{select, Either};
use std::time::Instant;
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
@@ -976,11 +974,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() {
//
// A timeout is used here to avoid blocking forever if there is a
// failure
let result = select(
rx2,
tokio::timer::delay(Instant::now() + Duration::from_secs(5)),
)
.await;
let result = select(rx2, tokio::time::delay_for(Duration::from_secs(5))).await;
if let Either::Right((_, _)) = result {
panic!("Timed out");
}
@@ -1012,11 +1006,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() {
});
// Wait for server handshake to complete.
let result = select(
rx1,
tokio::timer::delay(Instant::now() + Duration::from_secs(5)),
)
.await;
let result = select(rx1, tokio::time::delay_for(Duration::from_secs(5))).await;
if let Either::Right((_, _)) = result {
panic!("Timed out");
}
@@ -1113,7 +1103,7 @@ async fn increase_target_window_size_after_using_some() {
// drive an empty future to allow the WINDOW_UPDATE
// to go out while the response capacity is still in use.
conn.drive(yield_once()).await;
let _res = conn.drive(res.into_body().try_concat()).await;
let _res = conn.drive(util::concat(res.into_body())).await;
conn.await.expect("client");
};
@@ -1156,7 +1146,7 @@ async fn decrease_target_window_size() {
let mut body = res.into_parts().1;
let mut cap = body.flow_control().clone();
let bytes = conn.drive(body.try_concat()).await.expect("concat");
let bytes = conn.drive(util::concat(body)).await.expect("concat");
assert_eq!(bytes.len(), 65_535);
cap.release_capacity(bytes.len()).unwrap();
conn.await.expect("conn");
@@ -1568,7 +1558,7 @@ async fn data_padding() {
let resp = response.await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_body();
let bytes = body.try_concat().await.unwrap();
let bytes = util::concat(body).await.unwrap();
assert_eq!(bytes.len(), 100);
};
join(async move { conn.await.expect("client") }, fut).await;

View File

@@ -26,8 +26,8 @@ impl Server {
{
let mk_data = Arc::new(mk_data);
let rt = tokio::runtime::Runtime::new().unwrap();
let listener = rt
let mut rt = tokio::runtime::Runtime::new().unwrap();
let mut listener = rt
.block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))))
.unwrap();
let addr = listener.local_addr().unwrap();
@@ -35,8 +35,8 @@ impl Server {
let reqs2 = reqs.clone();
let join = thread::spawn(move || {
let server = async move {
let mut incoming = listener.incoming();
while let Some(socket) = incoming.next().await {
loop {
let socket = listener.accept().await.map(|(s, _)| s);
let reqs = reqs2.clone();
let mk_data = mk_data.clone();
tokio::spawn(async move {
@@ -140,7 +140,7 @@ fn hammer_client_concurrency() {
})
});
let rt = tokio::runtime::Runtime::new().unwrap();
let mut rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(tcp);
println!("...done");
}

View File

@@ -1,6 +1,6 @@
use futures::channel::oneshot;
use futures::future::join;
use futures::{StreamExt, TryStreamExt};
use futures::StreamExt;
use h2_support::assert_ping;
use h2_support::prelude::*;
@@ -84,7 +84,7 @@ async fn pong_has_highest_priority() {
assert_eq!(req.method(), "POST");
let body = req.into_parts().1;
let body = body.try_concat().await.expect("body");
let body = util::concat(body).await.expect("body");
assert_eq!(body.len(), data.len());
let res = Response::builder().status(200).body(()).unwrap();
stream.send_response(res, true).expect("response");

View File

@@ -8,7 +8,7 @@ use std::task::Context;
async fn single_stream_send_large_body() {
let _ = env_logger::try_init();
let payload = [0; 1024];
let payload = vec![0; 1024];
let mock = mock_io::Builder::new()
.handshake()
@@ -55,7 +55,7 @@ async fn single_stream_send_large_body() {
assert_eq!(stream.capacity(), payload.len());
// Send the data
stream.send_data(payload[..].into(), true).unwrap();
stream.send_data(payload.into(), true).unwrap();
// Get the response
let resp = h2.run(response).await.unwrap();
@@ -116,7 +116,7 @@ async fn multiple_streams_with_payload_greater_than_default_window() {
stream3.reserve_capacity(payload_clone.len());
assert_eq!(stream3.capacity(), 0);
stream1.send_data(payload_clone[..].into(), true).unwrap();
stream1.send_data(payload_clone.into(), true).unwrap();
// hold onto streams so they don't close
// stream1 doesn't close because response1 is used

View File

@@ -45,7 +45,7 @@ async fn recv_push_works() {
assert_eq!(request.into_parts().0.method, Method::GET);
let resp = response.await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let b = resp.into_body().try_concat().await.unwrap();
let b = util::concat(resp.into_body()).await.unwrap();
assert_eq!(b, "promised_data");
Ok(())
}

View File

@@ -1,7 +1,7 @@
#![deny(warnings)]
use futures::future::{join, poll_fn};
use futures::{StreamExt, TryStreamExt};
use futures::StreamExt;
use h2_support::prelude::*;
use tokio::io::AsyncWriteExt;
@@ -526,7 +526,7 @@ async fn abrupt_shutdown() {
let (req, tx) = srv.next().await.unwrap().expect("server receives request");
let req_fut = async move {
let body = req.into_body().try_concat().await;
let body = util::concat(req.into_body()).await;
drop(tx);
let err = body.expect_err("request body should error");
assert_eq!(
@@ -608,7 +608,7 @@ async fn graceful_shutdown() {
let body = req.into_parts().1;
let body = async move {
let buf = body.try_concat().await.unwrap();
let buf = util::concat(body).await.unwrap();
assert!(buf.is_empty());
let rsp = http::Response::builder().status(200).body(()).unwrap();

View File

@@ -5,6 +5,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt};
use h2_support::prelude::*;
use h2_support::util::yield_once;
use std::task::Poll;
use tokio::sync::oneshot;
#[tokio::test]
async fn send_recv_headers_only() {
@@ -80,7 +81,7 @@ async fn send_recv_data() {
assert_eq!(stream.capacity(), 5);
// Send the data
stream.send_data("hello", true).unwrap();
stream.send_data("hello".as_bytes(), true).unwrap();
// Get the response
let resp = h2.run(response).await.unwrap();
@@ -204,7 +205,7 @@ async fn errors_if_recv_frame_exceeds_max_frame_size() {
let resp = client.get("https://example.com/").await.expect("response");
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_parts().1;
let res = body.try_concat().await;
let res = util::concat(body).await;
let err = res.unwrap_err();
assert_eq!(err.to_string(), "protocol error: frame with invalid size");
};
@@ -252,7 +253,7 @@ async fn configure_max_frame_size() {
let resp = client.get("https://example.com/").await.expect("response");
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_parts().1;
let buf = body.try_concat().await.expect("body");
let buf = util::concat(body).await.expect("body");
assert_eq!(buf.len(), 16_385);
};
@@ -313,7 +314,7 @@ async fn recv_goaway_finishes_processed_streams() {
.expect("response");
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_parts().1;
let buf = body.try_concat().await.expect("body");
let buf = util::concat(body).await.expect("body");
assert_eq!(buf.len(), 16_384);
};
@@ -702,7 +703,7 @@ async fn rst_while_closing() {
let (io, mut srv) = mock::new();
// Rendevous when we've queued a trailers frame
let (tx, rx) = crate::futures::channel::oneshot::channel();
let (tx, rx) = oneshot::channel();
let srv = async move {
let settings = srv.assert_client_handshake().await;
@@ -765,7 +766,7 @@ async fn rst_with_buffered_data() {
let (io, mut srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = crate::futures::channel::oneshot::channel();
let (tx, rx) = oneshot::channel();
let srv = async move {
let settings = srv.assert_client_handshake().await;
@@ -817,7 +818,7 @@ async fn err_with_buffered_data() {
let (io, mut srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = crate::futures::channel::oneshot::channel();
let (tx, rx) = oneshot::channel();
let srv = async move {
let settings = srv.assert_client_handshake().await;
@@ -872,7 +873,7 @@ async fn send_err_with_buffered_data() {
let (io, mut srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = crate::futures::channel::oneshot::channel();
let (tx, rx) = oneshot::channel();
let srv = async move {
let settings = srv.assert_client_handshake().await;