feat(body): add body::aggregate and body::to_bytes functions
Adds utility functions to `hyper::body` to help asynchronously collecting all the buffers of some `HttpBody` into one. - `aggregate` will collect all into an `impl Buf` without copying the contents. This is ideal if you don't need a contiguous buffer. - `to_bytes` will copy all the data into a single contiguous `Bytes` buffer.
This commit is contained in:
@@ -98,7 +98,7 @@ required-features = ["runtime"]
|
||||
[[example]]
|
||||
name = "client_json"
|
||||
path = "examples/client_json.rs"
|
||||
required-features = ["runtime", "stream"]
|
||||
required-features = ["runtime"]
|
||||
|
||||
[[example]]
|
||||
name = "echo"
|
||||
@@ -162,6 +162,11 @@ path = "examples/web_api.rs"
|
||||
required-features = ["runtime", "stream"]
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "body"
|
||||
path = "benches/body.rs"
|
||||
required-features = ["runtime", "stream"]
|
||||
|
||||
[[bench]]
|
||||
name = "connect"
|
||||
path = "benches/connect.rs"
|
||||
|
||||
89
benches/body.rs
Normal file
89
benches/body.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
#![feature(test)]
|
||||
#![deny(warnings)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use bytes::Buf;
|
||||
use futures_util::stream;
|
||||
use futures_util::StreamExt;
|
||||
use hyper::body::Body;
|
||||
|
||||
macro_rules! bench_stream {
|
||||
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
|
||||
let mut rt = tokio::runtime::Builder::new()
|
||||
.basic_scheduler()
|
||||
.build()
|
||||
.expect("rt build");
|
||||
|
||||
let $total_ident: usize = $bytes * $count;
|
||||
$bencher.bytes = $total_ident as u64;
|
||||
let __s: &'static [&'static [u8]] = &[&[b'x'; $bytes] as &[u8]; $count] as _;
|
||||
|
||||
$bencher.iter(|| {
|
||||
rt.block_on(async {
|
||||
let $body_pat = Body::wrap_stream(
|
||||
stream::iter(__s.iter()).map(|&s| Ok::<_, std::convert::Infallible>(s)),
|
||||
);
|
||||
$block;
|
||||
});
|
||||
});
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! benches {
|
||||
($($name:ident, $bytes:expr, $count:expr;)+) => (
|
||||
mod aggregate {
|
||||
use super::*;
|
||||
|
||||
$(
|
||||
#[bench]
|
||||
fn $name(b: &mut test::Bencher) {
|
||||
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
|
||||
let buf = hyper::body::aggregate(body).await.unwrap();
|
||||
assert_eq!(buf.remaining(), total);
|
||||
});
|
||||
}
|
||||
)+
|
||||
}
|
||||
|
||||
mod manual_into_vec {
|
||||
use super::*;
|
||||
|
||||
$(
|
||||
#[bench]
|
||||
fn $name(b: &mut test::Bencher) {
|
||||
bench_stream!(b, bytes: $bytes, count: $count, total, mut body, {
|
||||
let mut vec = Vec::new();
|
||||
while let Some(chunk) = body.next().await {
|
||||
vec.extend_from_slice(&chunk.unwrap());
|
||||
}
|
||||
assert_eq!(vec.len(), total);
|
||||
});
|
||||
}
|
||||
)+
|
||||
}
|
||||
|
||||
mod to_bytes {
|
||||
use super::*;
|
||||
|
||||
$(
|
||||
#[bench]
|
||||
fn $name(b: &mut test::Bencher) {
|
||||
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
|
||||
let bytes = hyper::body::to_bytes(body).await.unwrap();
|
||||
assert_eq!(bytes.len(), total);
|
||||
});
|
||||
}
|
||||
)+
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// ===== Actual Benchmarks =====
|
||||
|
||||
benches! {
|
||||
bytes_1_000_count_2, 1_000, 2;
|
||||
bytes_1_000_count_10, 1_000, 10;
|
||||
bytes_10_000_count_1, 10_000, 1;
|
||||
bytes_10_000_count_10, 10_000, 10;
|
||||
}
|
||||
@@ -40,6 +40,8 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
|
||||
println!("Response: {}", res.status());
|
||||
println!("Headers: {:#?}\n", res.headers());
|
||||
|
||||
// Stream the body, writing each chunk to stdout as we get it
|
||||
// (instead of buffering and printing at the end).
|
||||
while let Some(next) = res.body_mut().data().await {
|
||||
let chunk = next?;
|
||||
io::stdout().write_all(&chunk).await?;
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use bytes::buf::BufExt as _;
|
||||
use hyper::Client;
|
||||
|
||||
// A simple type alias so as to DRY.
|
||||
@@ -27,14 +27,13 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
|
||||
let client = Client::new();
|
||||
|
||||
// Fetch the url...
|
||||
let mut res = client.get(url).await?;
|
||||
// asynchronously concatenate chunks of the body
|
||||
let mut body = Vec::new();
|
||||
while let Some(chunk) = res.body_mut().next().await {
|
||||
body.extend_from_slice(&chunk?);
|
||||
}
|
||||
let res = client.get(url).await?;
|
||||
|
||||
// asynchronously aggregate the chunks of the body
|
||||
let body = hyper::body::aggregate(res.into_body()).await?;
|
||||
|
||||
// try to parse as json with serde_json
|
||||
let users = serde_json::from_slice(&body)?;
|
||||
let users = serde_json::from_reader(body.reader())?;
|
||||
|
||||
Ok(users)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
//#![deny(warnings)]
|
||||
#![deny(warnings)]
|
||||
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use futures_util::TryStreamExt;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||
|
||||
/// This is our service handler. It receives a Request, routes on its
|
||||
/// path, and returns a Future of a Response.
|
||||
async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
|
||||
async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
|
||||
match (req.method(), req.uri().path()) {
|
||||
// Serve some instructions at /
|
||||
(&Method::GET, "/") => Ok(Response::new(Body::from(
|
||||
@@ -34,10 +34,7 @@ async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
|
||||
// So here we do `.await` on the future, waiting on concatenating the full body,
|
||||
// then afterwards the content can be reversed. Only then can we return a `Response`.
|
||||
(&Method::POST, "/echo/reversed") => {
|
||||
let mut whole_body = Vec::new();
|
||||
while let Some(chunk) = req.body_mut().next().await {
|
||||
whole_body.extend_from_slice(&chunk?);
|
||||
}
|
||||
let whole_body = hyper::body::to_bytes(req.into_body()).await?;
|
||||
|
||||
let reversed_body = whole_body.iter().rev().cloned().collect::<Vec<u8>>();
|
||||
Ok(Response::new(Body::from(reversed_body)))
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use std::collections::HashMap;
|
||||
use url::form_urlencoded;
|
||||
|
||||
@@ -13,15 +12,12 @@ static MISSING: &[u8] = b"Missing field";
|
||||
static NOTNUMERIC: &[u8] = b"Number field is not numeric";
|
||||
|
||||
// Using service_fn, we can turn this function into a `Service`.
|
||||
async fn param_example(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
|
||||
async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
|
||||
match (req.method(), req.uri().path()) {
|
||||
(&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())),
|
||||
(&Method::POST, "/post") => {
|
||||
// Concatenate the body...
|
||||
let mut b = Vec::new();
|
||||
while let Some(chunk) = req.body_mut().next().await {
|
||||
b.extend_from_slice(&chunk?);
|
||||
}
|
||||
let b = hyper::body::to_bytes(req.into_body()).await?;
|
||||
// Parse the request body. form_urlencoded::parse
|
||||
// always succeeds, but in general parsing may
|
||||
// fail (for example, an invalid post of json), so
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#![deny(warnings)]
|
||||
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use bytes::buf::BufExt;
|
||||
use futures_util::{stream, StreamExt};
|
||||
use hyper::client::HttpConnector;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode};
|
||||
@@ -24,25 +25,24 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
|
||||
|
||||
let web_res = client.request(req).await?;
|
||||
// Compare the JSON we sent (before) with what we received (after):
|
||||
let body = Body::wrap_stream(web_res.into_body().map_ok(|b| {
|
||||
format!(
|
||||
"<b>POST request body</b>: {}<br><b>Response</b>: {}",
|
||||
let before = stream::once(async {
|
||||
Ok(format!(
|
||||
"<b>POST request body</b>: {}<br><b>Response</b>: ",
|
||||
POST_DATA,
|
||||
std::str::from_utf8(&b).unwrap()
|
||||
)
|
||||
}));
|
||||
.into())
|
||||
});
|
||||
let after = web_res.into_body();
|
||||
let body = Body::wrap_stream(before.chain(after));
|
||||
|
||||
Ok(Response::new(body))
|
||||
}
|
||||
|
||||
async fn api_post_response(mut req: Request<Body>) -> Result<Response<Body>> {
|
||||
// Concatenate the body...
|
||||
let mut whole_body = Vec::new();
|
||||
while let Some(chunk) = req.body_mut().next().await {
|
||||
whole_body.extend_from_slice(&chunk?);
|
||||
}
|
||||
async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
|
||||
// Aggregate the body...
|
||||
let whole_body = hyper::body::aggregate(req.into_body()).await?;
|
||||
// Decode as JSON...
|
||||
let mut data: serde_json::Value = serde_json::from_slice(&whole_body)?;
|
||||
let mut data: serde_json::Value = serde_json::from_reader(whole_body.reader())?;
|
||||
// Change the JSON...
|
||||
data["test"] = serde_json::Value::from("test_value");
|
||||
// And respond with the new JSON.
|
||||
|
||||
25
src/body/aggregate.rs
Normal file
25
src/body/aggregate.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use bytes::Buf;
|
||||
|
||||
use super::HttpBody;
|
||||
use crate::common::buf::BufList;
|
||||
|
||||
/// Aggregate the data buffers from a body asynchronously.
|
||||
///
|
||||
/// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without
|
||||
/// copying them. This is ideal if you don't require a contiguous buffer.
|
||||
pub async fn aggregate<T>(body: T) -> Result<impl Buf, T::Error>
|
||||
where
|
||||
T: HttpBody,
|
||||
{
|
||||
let mut bufs = BufList::new();
|
||||
|
||||
futures_util::pin_mut!(body);
|
||||
while let Some(buf) = body.data().await {
|
||||
let buf = buf?;
|
||||
if buf.has_remaining() {
|
||||
bufs.push(buf);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(bufs)
|
||||
}
|
||||
@@ -18,11 +18,16 @@
|
||||
pub use bytes::{Buf, Bytes};
|
||||
pub use http_body::Body as HttpBody;
|
||||
|
||||
pub use self::aggregate::aggregate;
|
||||
pub use self::body::{Body, Sender};
|
||||
pub use self::to_bytes::to_bytes;
|
||||
|
||||
pub(crate) use self::payload::Payload;
|
||||
|
||||
mod aggregate;
|
||||
mod body;
|
||||
mod payload;
|
||||
mod to_bytes;
|
||||
|
||||
/// An optimization to try to take a full body if immediately available.
|
||||
///
|
||||
|
||||
36
src/body/to_bytes.rs
Normal file
36
src/body/to_bytes.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use bytes::{Buf, BufMut, Bytes};
|
||||
|
||||
use super::HttpBody;
|
||||
|
||||
/// dox
|
||||
pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
|
||||
where
|
||||
T: HttpBody,
|
||||
{
|
||||
futures_util::pin_mut!(body);
|
||||
|
||||
// If there's only 1 chunk, we can just return Buf::to_bytes()
|
||||
let mut first = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(Bytes::new());
|
||||
};
|
||||
|
||||
let second = if let Some(buf) = body.data().await {
|
||||
buf?
|
||||
} else {
|
||||
return Ok(first.to_bytes());
|
||||
};
|
||||
|
||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
||||
let mut vec = Vec::with_capacity(cap);
|
||||
vec.put(first);
|
||||
vec.put(second);
|
||||
|
||||
while let Some(buf) = body.data().await {
|
||||
vec.put(buf?);
|
||||
}
|
||||
|
||||
Ok(vec.into())
|
||||
}
|
||||
75
src/common/buf.rs
Normal file
75
src/common/buf.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::io::IoSlice;
|
||||
|
||||
use bytes::Buf;
|
||||
|
||||
pub(crate) struct BufList<T> {
|
||||
bufs: VecDeque<T>,
|
||||
}
|
||||
|
||||
impl<T: Buf> BufList<T> {
|
||||
pub(crate) fn new() -> BufList<T> {
|
||||
BufList {
|
||||
bufs: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn push(&mut self, buf: T) {
|
||||
debug_assert!(buf.has_remaining());
|
||||
self.bufs.push_back(buf);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn bufs_cnt(&self) -> usize {
|
||||
self.bufs.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Buf> Buf for BufList<T> {
|
||||
#[inline]
|
||||
fn remaining(&self) -> usize {
|
||||
self.bufs.iter().map(|buf| buf.remaining()).sum()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bytes(&self) -> &[u8] {
|
||||
for buf in &self.bufs {
|
||||
return buf.bytes();
|
||||
}
|
||||
&[]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn advance(&mut self, mut cnt: usize) {
|
||||
while cnt > 0 {
|
||||
{
|
||||
let front = &mut self.bufs[0];
|
||||
let rem = front.remaining();
|
||||
if rem > cnt {
|
||||
front.advance(cnt);
|
||||
return;
|
||||
} else {
|
||||
front.advance(rem);
|
||||
cnt -= rem;
|
||||
}
|
||||
}
|
||||
self.bufs.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
|
||||
if dst.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
let mut vecs = 0;
|
||||
for buf in &self.bufs {
|
||||
vecs += buf.bytes_vectored(&mut dst[vecs..]);
|
||||
if vecs == dst.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
vecs
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ macro_rules! ready {
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) mod buf;
|
||||
pub(crate) mod drain;
|
||||
pub(crate) mod exec;
|
||||
pub(crate) mod io;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::cell::Cell;
|
||||
use std::cmp;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io::{self, IoSlice};
|
||||
|
||||
@@ -8,6 +7,7 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use super::{Http1Transaction, ParseContext, ParsedMessage};
|
||||
use crate::common::buf::BufList;
|
||||
use crate::common::{task, Pin, Poll, Unpin};
|
||||
|
||||
/// The initial buffer size allocated before trying to read from IO.
|
||||
@@ -90,7 +90,7 @@ where
|
||||
pub fn set_write_strategy_flatten(&mut self) {
|
||||
// this should always be called only at construction time,
|
||||
// so this assert is here to catch myself
|
||||
debug_assert!(self.write_buf.queue.bufs.is_empty());
|
||||
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
|
||||
self.write_buf.set_strategy(WriteStrategy::Flatten);
|
||||
}
|
||||
|
||||
@@ -431,16 +431,16 @@ pub(super) struct WriteBuf<B> {
|
||||
headers: Cursor<Vec<u8>>,
|
||||
max_buf_size: usize,
|
||||
/// Deque of user buffers if strategy is Queue
|
||||
queue: BufDeque<B>,
|
||||
queue: BufList<B>,
|
||||
strategy: WriteStrategy,
|
||||
}
|
||||
|
||||
impl<B> WriteBuf<B> {
|
||||
impl<B: Buf> WriteBuf<B> {
|
||||
fn new() -> WriteBuf<B> {
|
||||
WriteBuf {
|
||||
headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),
|
||||
max_buf_size: DEFAULT_MAX_BUFFER_SIZE,
|
||||
queue: BufDeque::new(),
|
||||
queue: BufList::new(),
|
||||
strategy: WriteStrategy::Auto,
|
||||
}
|
||||
}
|
||||
@@ -479,7 +479,7 @@ where
|
||||
}
|
||||
}
|
||||
WriteStrategy::Auto | WriteStrategy::Queue => {
|
||||
self.queue.bufs.push_back(buf.into());
|
||||
self.queue.push(buf.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -488,7 +488,7 @@ where
|
||||
match self.strategy {
|
||||
WriteStrategy::Flatten => self.remaining() < self.max_buf_size,
|
||||
WriteStrategy::Auto | WriteStrategy::Queue => {
|
||||
self.queue.bufs.len() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size
|
||||
self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -608,66 +608,6 @@ enum WriteStrategy {
|
||||
Queue,
|
||||
}
|
||||
|
||||
struct BufDeque<T> {
|
||||
bufs: VecDeque<T>,
|
||||
}
|
||||
|
||||
impl<T> BufDeque<T> {
|
||||
fn new() -> BufDeque<T> {
|
||||
BufDeque {
|
||||
bufs: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Buf> Buf for BufDeque<T> {
|
||||
#[inline]
|
||||
fn remaining(&self) -> usize {
|
||||
self.bufs.iter().map(|buf| buf.remaining()).sum()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bytes(&self) -> &[u8] {
|
||||
for buf in &self.bufs {
|
||||
return buf.bytes();
|
||||
}
|
||||
&[]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn advance(&mut self, mut cnt: usize) {
|
||||
while cnt > 0 {
|
||||
{
|
||||
let front = &mut self.bufs[0];
|
||||
let rem = front.remaining();
|
||||
if rem > cnt {
|
||||
front.advance(cnt);
|
||||
return;
|
||||
} else {
|
||||
front.advance(rem);
|
||||
cnt -= rem;
|
||||
}
|
||||
}
|
||||
self.bufs.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
|
||||
if dst.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
let mut vecs = 0;
|
||||
for buf in &self.bufs {
|
||||
vecs += buf.bytes_vectored(&mut dst[vecs..]);
|
||||
if vecs == dst.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
vecs
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -871,12 +811,12 @@ mod tests {
|
||||
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"hyper!".to_vec()));
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 3);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
|
||||
buffered.flush().unwrap();
|
||||
|
||||
assert_eq!(buffered.io, b"hello world, it's hyper!");
|
||||
assert_eq!(buffered.io.num_writes(), 1);
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 0);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -896,7 +836,7 @@ mod tests {
|
||||
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"hyper!".to_vec()));
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 0);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
||||
|
||||
buffered.flush().await.expect("flush");
|
||||
}
|
||||
@@ -921,11 +861,11 @@ mod tests {
|
||||
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"hyper!".to_vec()));
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 3);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
|
||||
|
||||
buffered.flush().await.expect("flush");
|
||||
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 0);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -949,11 +889,11 @@ mod tests {
|
||||
buffered.buffer(Cursor::new(b"world, ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"it's ".to_vec()));
|
||||
buffered.buffer(Cursor::new(b"hyper!".to_vec()));
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 3);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
|
||||
|
||||
buffered.flush().await.expect("flush");
|
||||
|
||||
assert_eq!(buffered.write_buf.queue.bufs.len(), 0);
|
||||
assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
|
||||
}
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
|
||||
@@ -11,12 +11,12 @@ use std::task::{Context, Poll};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use hyper::body::to_bytes as concat;
|
||||
use hyper::{Body, Client, Method, Request, StatusCode};
|
||||
|
||||
use futures_channel::oneshot;
|
||||
use futures_core::{Future, Stream, TryFuture};
|
||||
use futures_util::future::{self, FutureExt, TryFutureExt};
|
||||
use futures_util::StreamExt;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
@@ -28,14 +28,6 @@ fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStr
|
||||
TcpStream::connect(*addr)
|
||||
}
|
||||
|
||||
async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
|
||||
let mut vec = Vec::new();
|
||||
while let Some(chunk) = body.next().await {
|
||||
vec.extend_from_slice(&chunk?);
|
||||
}
|
||||
Ok(vec.into())
|
||||
}
|
||||
|
||||
macro_rules! test {
|
||||
(
|
||||
name: $name:ident,
|
||||
|
||||
@@ -355,7 +355,7 @@ async fn async_test(cfg: __TestConfig) {
|
||||
func(&req.headers());
|
||||
}
|
||||
let sbody = sreq.body;
|
||||
concat(req.into_body()).map_ok(move |body| {
|
||||
hyper::body::to_bytes(req.into_body()).map_ok(move |body| {
|
||||
assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
|
||||
|
||||
let mut res = Response::builder()
|
||||
@@ -410,7 +410,7 @@ async fn async_test(cfg: __TestConfig) {
|
||||
for func in &cheaders {
|
||||
func(&res.headers());
|
||||
}
|
||||
concat(res.into_body())
|
||||
hyper::body::to_bytes(res.into_body())
|
||||
})
|
||||
.map_ok(move |body| {
|
||||
assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
|
||||
@@ -473,11 +473,3 @@ fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
|
||||
let proxy_addr = srv.local_addr();
|
||||
(proxy_addr, srv.map(|res| res.expect("proxy error")))
|
||||
}
|
||||
|
||||
async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
|
||||
let mut vec = Vec::new();
|
||||
while let Some(chunk) = body.next().await {
|
||||
vec.extend_from_slice(&chunk?);
|
||||
}
|
||||
Ok(vec.into())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user