feat(body): add body::aggregate and body::to_bytes functions

Adds utility functions to `hyper::body` to help asynchronously
collecting all the buffers of some `HttpBody` into one.

- `aggregate` will collect all into an `impl Buf` without copying the
  contents. This is ideal if you don't need a contiguous buffer.
- `to_bytes` will copy all the data into a single contiguous `Bytes`
  buffer.
This commit is contained in:
Sean McArthur
2019-12-05 17:51:37 -08:00
parent 5a59875742
commit 8ba9a8d2c4
15 changed files with 282 additions and 128 deletions

View File

@@ -98,7 +98,7 @@ required-features = ["runtime"]
[[example]] [[example]]
name = "client_json" name = "client_json"
path = "examples/client_json.rs" path = "examples/client_json.rs"
required-features = ["runtime", "stream"] required-features = ["runtime"]
[[example]] [[example]]
name = "echo" name = "echo"
@@ -162,6 +162,11 @@ path = "examples/web_api.rs"
required-features = ["runtime", "stream"] required-features = ["runtime", "stream"]
[[bench]]
name = "body"
path = "benches/body.rs"
required-features = ["runtime", "stream"]
[[bench]] [[bench]]
name = "connect" name = "connect"
path = "benches/connect.rs" path = "benches/connect.rs"

89
benches/body.rs Normal file
View File

@@ -0,0 +1,89 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use bytes::Buf;
use futures_util::stream;
use futures_util::StreamExt;
use hyper::body::Body;
macro_rules! bench_stream {
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
let mut rt = tokio::runtime::Builder::new()
.basic_scheduler()
.build()
.expect("rt build");
let $total_ident: usize = $bytes * $count;
$bencher.bytes = $total_ident as u64;
let __s: &'static [&'static [u8]] = &[&[b'x'; $bytes] as &[u8]; $count] as _;
$bencher.iter(|| {
rt.block_on(async {
let $body_pat = Body::wrap_stream(
stream::iter(__s.iter()).map(|&s| Ok::<_, std::convert::Infallible>(s)),
);
$block;
});
});
}};
}
macro_rules! benches {
($($name:ident, $bytes:expr, $count:expr;)+) => (
mod aggregate {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
let buf = hyper::body::aggregate(body).await.unwrap();
assert_eq!(buf.remaining(), total);
});
}
)+
}
mod manual_into_vec {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, mut body, {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk.unwrap());
}
assert_eq!(vec.len(), total);
});
}
)+
}
mod to_bytes {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
let bytes = hyper::body::to_bytes(body).await.unwrap();
assert_eq!(bytes.len(), total);
});
}
)+
}
)
}
// ===== Actual Benchmarks =====
benches! {
bytes_1_000_count_2, 1_000, 2;
bytes_1_000_count_10, 1_000, 10;
bytes_10_000_count_1, 10_000, 1;
bytes_10_000_count_10, 10_000, 10;
}

View File

@@ -40,6 +40,8 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
println!("Response: {}", res.status()); println!("Response: {}", res.status());
println!("Headers: {:#?}\n", res.headers()); println!("Headers: {:#?}\n", res.headers());
// Stream the body, writing each chunk to stdout as we get it
// (instead of buffering and printing at the end).
while let Some(next) = res.body_mut().data().await { while let Some(next) = res.body_mut().data().await {
let chunk = next?; let chunk = next?;
io::stdout().write_all(&chunk).await?; io::stdout().write_all(&chunk).await?;

View File

@@ -4,7 +4,7 @@
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
use futures_util::StreamExt; use bytes::buf::BufExt as _;
use hyper::Client; use hyper::Client;
// A simple type alias so as to DRY. // A simple type alias so as to DRY.
@@ -27,14 +27,13 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let client = Client::new(); let client = Client::new();
// Fetch the url... // Fetch the url...
let mut res = client.get(url).await?; let res = client.get(url).await?;
// asynchronously concatenate chunks of the body
let mut body = Vec::new(); // asynchronously aggregate the chunks of the body
while let Some(chunk) = res.body_mut().next().await { let body = hyper::body::aggregate(res.into_body()).await?;
body.extend_from_slice(&chunk?);
}
// try to parse as json with serde_json // try to parse as json with serde_json
let users = serde_json::from_slice(&body)?; let users = serde_json::from_reader(body.reader())?;
Ok(users) Ok(users)
} }

View File

@@ -1,12 +1,12 @@
//#![deny(warnings)] #![deny(warnings)]
use futures_util::{StreamExt, TryStreamExt}; use futures_util::TryStreamExt;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode}; use hyper::{Body, Method, Request, Response, Server, StatusCode};
/// This is our service handler. It receives a Request, routes on its /// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response. /// path, and returns a Future of a Response.
async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> { async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
match (req.method(), req.uri().path()) { match (req.method(), req.uri().path()) {
// Serve some instructions at / // Serve some instructions at /
(&Method::GET, "/") => Ok(Response::new(Body::from( (&Method::GET, "/") => Ok(Response::new(Body::from(
@@ -34,10 +34,7 @@ async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
// So here we do `.await` on the future, waiting on concatenating the full body, // So here we do `.await` on the future, waiting on concatenating the full body,
// then afterwards the content can be reversed. Only then can we return a `Response`. // then afterwards the content can be reversed. Only then can we return a `Response`.
(&Method::POST, "/echo/reversed") => { (&Method::POST, "/echo/reversed") => {
let mut whole_body = Vec::new(); let whole_body = hyper::body::to_bytes(req.into_body()).await?;
while let Some(chunk) = req.body_mut().next().await {
whole_body.extend_from_slice(&chunk?);
}
let reversed_body = whole_body.iter().rev().cloned().collect::<Vec<u8>>(); let reversed_body = whole_body.iter().rev().cloned().collect::<Vec<u8>>();
Ok(Response::new(Body::from(reversed_body))) Ok(Response::new(Body::from(reversed_body)))

View File

@@ -4,7 +4,6 @@
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode}; use hyper::{Body, Method, Request, Response, Server, StatusCode};
use futures_util::StreamExt;
use std::collections::HashMap; use std::collections::HashMap;
use url::form_urlencoded; use url::form_urlencoded;
@@ -13,15 +12,12 @@ static MISSING: &[u8] = b"Missing field";
static NOTNUMERIC: &[u8] = b"Number field is not numeric"; static NOTNUMERIC: &[u8] = b"Number field is not numeric";
// Using service_fn, we can turn this function into a `Service`. // Using service_fn, we can turn this function into a `Service`.
async fn param_example(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> { async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
match (req.method(), req.uri().path()) { match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())), (&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())),
(&Method::POST, "/post") => { (&Method::POST, "/post") => {
// Concatenate the body... // Concatenate the body...
let mut b = Vec::new(); let b = hyper::body::to_bytes(req.into_body()).await?;
while let Some(chunk) = req.body_mut().next().await {
b.extend_from_slice(&chunk?);
}
// Parse the request body. form_urlencoded::parse // Parse the request body. form_urlencoded::parse
// always succeeds, but in general parsing may // always succeeds, but in general parsing may
// fail (for example, an invalid post of json), so // fail (for example, an invalid post of json), so

View File

@@ -1,6 +1,7 @@
#![deny(warnings)] #![deny(warnings)]
use futures_util::{StreamExt, TryStreamExt}; use bytes::buf::BufExt;
use futures_util::{stream, StreamExt};
use hyper::client::HttpConnector; use hyper::client::HttpConnector;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode}; use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode};
@@ -24,25 +25,24 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
let web_res = client.request(req).await?; let web_res = client.request(req).await?;
// Compare the JSON we sent (before) with what we received (after): // Compare the JSON we sent (before) with what we received (after):
let body = Body::wrap_stream(web_res.into_body().map_ok(|b| { let before = stream::once(async {
format!( Ok(format!(
"<b>POST request body</b>: {}<br><b>Response</b>: {}", "<b>POST request body</b>: {}<br><b>Response</b>: ",
POST_DATA, POST_DATA,
std::str::from_utf8(&b).unwrap()
) )
})); .into())
});
let after = web_res.into_body();
let body = Body::wrap_stream(before.chain(after));
Ok(Response::new(body)) Ok(Response::new(body))
} }
async fn api_post_response(mut req: Request<Body>) -> Result<Response<Body>> { async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
// Concatenate the body... // Aggregate the body...
let mut whole_body = Vec::new(); let whole_body = hyper::body::aggregate(req.into_body()).await?;
while let Some(chunk) = req.body_mut().next().await {
whole_body.extend_from_slice(&chunk?);
}
// Decode as JSON... // Decode as JSON...
let mut data: serde_json::Value = serde_json::from_slice(&whole_body)?; let mut data: serde_json::Value = serde_json::from_reader(whole_body.reader())?;
// Change the JSON... // Change the JSON...
data["test"] = serde_json::Value::from("test_value"); data["test"] = serde_json::Value::from("test_value");
// And respond with the new JSON. // And respond with the new JSON.

25
src/body/aggregate.rs Normal file
View File

@@ -0,0 +1,25 @@
use bytes::Buf;
use super::HttpBody;
use crate::common::buf::BufList;
/// Aggregate the data buffers from a body asynchronously.
///
/// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without
/// copying them. This is ideal if you don't require a contiguous buffer.
pub async fn aggregate<T>(body: T) -> Result<impl Buf, T::Error>
where
T: HttpBody,
{
let mut bufs = BufList::new();
futures_util::pin_mut!(body);
while let Some(buf) = body.data().await {
let buf = buf?;
if buf.has_remaining() {
bufs.push(buf);
}
}
Ok(bufs)
}

View File

@@ -18,11 +18,16 @@
pub use bytes::{Buf, Bytes}; pub use bytes::{Buf, Bytes};
pub use http_body::Body as HttpBody; pub use http_body::Body as HttpBody;
pub use self::aggregate::aggregate;
pub use self::body::{Body, Sender}; pub use self::body::{Body, Sender};
pub use self::to_bytes::to_bytes;
pub(crate) use self::payload::Payload; pub(crate) use self::payload::Payload;
mod aggregate;
mod body; mod body;
mod payload; mod payload;
mod to_bytes;
/// An optimization to try to take a full body if immediately available. /// An optimization to try to take a full body if immediately available.
/// ///

36
src/body/to_bytes.rs Normal file
View File

@@ -0,0 +1,36 @@
use bytes::{Buf, BufMut, Bytes};
use super::HttpBody;
/// dox
pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
T: HttpBody,
{
futures_util::pin_mut!(body);
// If there's only 1 chunk, we can just return Buf::to_bytes()
let mut first = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(Bytes::new());
};
let second = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(first.to_bytes());
};
// With more than 1 buf, we gotta flatten into a Vec first.
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
let mut vec = Vec::with_capacity(cap);
vec.put(first);
vec.put(second);
while let Some(buf) = body.data().await {
vec.put(buf?);
}
Ok(vec.into())
}

75
src/common/buf.rs Normal file
View File

@@ -0,0 +1,75 @@
use std::collections::VecDeque;
use std::io::IoSlice;
use bytes::Buf;
pub(crate) struct BufList<T> {
bufs: VecDeque<T>,
}
impl<T: Buf> BufList<T> {
pub(crate) fn new() -> BufList<T> {
BufList {
bufs: VecDeque::new(),
}
}
#[inline]
pub(crate) fn push(&mut self, buf: T) {
debug_assert!(buf.has_remaining());
self.bufs.push_back(buf);
}
#[inline]
pub(crate) fn bufs_cnt(&self) -> usize {
self.bufs.len()
}
}
impl<T: Buf> Buf for BufList<T> {
#[inline]
fn remaining(&self) -> usize {
self.bufs.iter().map(|buf| buf.remaining()).sum()
}
#[inline]
fn bytes(&self) -> &[u8] {
for buf in &self.bufs {
return buf.bytes();
}
&[]
}
#[inline]
fn advance(&mut self, mut cnt: usize) {
while cnt > 0 {
{
let front = &mut self.bufs[0];
let rem = front.remaining();
if rem > cnt {
front.advance(cnt);
return;
} else {
front.advance(rem);
cnt -= rem;
}
}
self.bufs.pop_front();
}
}
#[inline]
fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
if dst.is_empty() {
return 0;
}
let mut vecs = 0;
for buf in &self.bufs {
vecs += buf.bytes_vectored(&mut dst[vecs..]);
if vecs == dst.len() {
break;
}
}
vecs
}
}

View File

@@ -7,6 +7,7 @@ macro_rules! ready {
}; };
} }
pub(crate) mod buf;
pub(crate) mod drain; pub(crate) mod drain;
pub(crate) mod exec; pub(crate) mod exec;
pub(crate) mod io; pub(crate) mod io;

View File

@@ -1,6 +1,5 @@
use std::cell::Cell; use std::cell::Cell;
use std::cmp; use std::cmp;
use std::collections::VecDeque;
use std::fmt; use std::fmt;
use std::io::{self, IoSlice}; use std::io::{self, IoSlice};
@@ -8,6 +7,7 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use super::{Http1Transaction, ParseContext, ParsedMessage}; use super::{Http1Transaction, ParseContext, ParsedMessage};
use crate::common::buf::BufList;
use crate::common::{task, Pin, Poll, Unpin}; use crate::common::{task, Pin, Poll, Unpin};
/// The initial buffer size allocated before trying to read from IO. /// The initial buffer size allocated before trying to read from IO.
@@ -90,7 +90,7 @@ where
pub fn set_write_strategy_flatten(&mut self) { pub fn set_write_strategy_flatten(&mut self) {
// this should always be called only at construction time, // this should always be called only at construction time,
// so this assert is here to catch myself // so this assert is here to catch myself
debug_assert!(self.write_buf.queue.bufs.is_empty()); debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
self.write_buf.set_strategy(WriteStrategy::Flatten); self.write_buf.set_strategy(WriteStrategy::Flatten);
} }
@@ -431,16 +431,16 @@ pub(super) struct WriteBuf<B> {
headers: Cursor<Vec<u8>>, headers: Cursor<Vec<u8>>,
max_buf_size: usize, max_buf_size: usize,
/// Deque of user buffers if strategy is Queue /// Deque of user buffers if strategy is Queue
queue: BufDeque<B>, queue: BufList<B>,
strategy: WriteStrategy, strategy: WriteStrategy,
} }
impl<B> WriteBuf<B> { impl<B: Buf> WriteBuf<B> {
fn new() -> WriteBuf<B> { fn new() -> WriteBuf<B> {
WriteBuf { WriteBuf {
headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),
max_buf_size: DEFAULT_MAX_BUFFER_SIZE, max_buf_size: DEFAULT_MAX_BUFFER_SIZE,
queue: BufDeque::new(), queue: BufList::new(),
strategy: WriteStrategy::Auto, strategy: WriteStrategy::Auto,
} }
} }
@@ -479,7 +479,7 @@ where
} }
} }
WriteStrategy::Auto | WriteStrategy::Queue => { WriteStrategy::Auto | WriteStrategy::Queue => {
self.queue.bufs.push_back(buf.into()); self.queue.push(buf.into());
} }
} }
} }
@@ -488,7 +488,7 @@ where
match self.strategy { match self.strategy {
WriteStrategy::Flatten => self.remaining() < self.max_buf_size, WriteStrategy::Flatten => self.remaining() < self.max_buf_size,
WriteStrategy::Auto | WriteStrategy::Queue => { WriteStrategy::Auto | WriteStrategy::Queue => {
self.queue.bufs.len() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size
} }
} }
} }
@@ -608,66 +608,6 @@ enum WriteStrategy {
Queue, Queue,
} }
struct BufDeque<T> {
bufs: VecDeque<T>,
}
impl<T> BufDeque<T> {
fn new() -> BufDeque<T> {
BufDeque {
bufs: VecDeque::new(),
}
}
}
impl<T: Buf> Buf for BufDeque<T> {
#[inline]
fn remaining(&self) -> usize {
self.bufs.iter().map(|buf| buf.remaining()).sum()
}
#[inline]
fn bytes(&self) -> &[u8] {
for buf in &self.bufs {
return buf.bytes();
}
&[]
}
#[inline]
fn advance(&mut self, mut cnt: usize) {
while cnt > 0 {
{
let front = &mut self.bufs[0];
let rem = front.remaining();
if rem > cnt {
front.advance(cnt);
return;
} else {
front.advance(rem);
cnt -= rem;
}
}
self.bufs.pop_front();
}
}
#[inline]
fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
if dst.is_empty() {
return 0;
}
let mut vecs = 0;
for buf in &self.bufs {
vecs += buf.bytes_vectored(&mut dst[vecs..]);
if vecs == dst.len() {
break;
}
}
vecs
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -871,12 +811,12 @@ mod tests {
buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"world, ".to_vec()));
buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec()));
buffered.buffer(Cursor::new(b"hyper!".to_vec())); buffered.buffer(Cursor::new(b"hyper!".to_vec()));
assert_eq!(buffered.write_buf.queue.bufs.len(), 3); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
buffered.flush().unwrap(); buffered.flush().unwrap();
assert_eq!(buffered.io, b"hello world, it's hyper!"); assert_eq!(buffered.io, b"hello world, it's hyper!");
assert_eq!(buffered.io.num_writes(), 1); assert_eq!(buffered.io.num_writes(), 1);
assert_eq!(buffered.write_buf.queue.bufs.len(), 0); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
} }
*/ */
@@ -896,7 +836,7 @@ mod tests {
buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"world, ".to_vec()));
buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec()));
buffered.buffer(Cursor::new(b"hyper!".to_vec())); buffered.buffer(Cursor::new(b"hyper!".to_vec()));
assert_eq!(buffered.write_buf.queue.bufs.len(), 0); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
buffered.flush().await.expect("flush"); buffered.flush().await.expect("flush");
} }
@@ -921,11 +861,11 @@ mod tests {
buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"world, ".to_vec()));
buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec()));
buffered.buffer(Cursor::new(b"hyper!".to_vec())); buffered.buffer(Cursor::new(b"hyper!".to_vec()));
assert_eq!(buffered.write_buf.queue.bufs.len(), 3); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
buffered.flush().await.expect("flush"); buffered.flush().await.expect("flush");
assert_eq!(buffered.write_buf.queue.bufs.len(), 0); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
} }
#[tokio::test] #[tokio::test]
@@ -949,11 +889,11 @@ mod tests {
buffered.buffer(Cursor::new(b"world, ".to_vec())); buffered.buffer(Cursor::new(b"world, ".to_vec()));
buffered.buffer(Cursor::new(b"it's ".to_vec())); buffered.buffer(Cursor::new(b"it's ".to_vec()));
buffered.buffer(Cursor::new(b"hyper!".to_vec())); buffered.buffer(Cursor::new(b"hyper!".to_vec()));
assert_eq!(buffered.write_buf.queue.bufs.len(), 3); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3);
buffered.flush().await.expect("flush"); buffered.flush().await.expect("flush");
assert_eq!(buffered.write_buf.queue.bufs.len(), 0); assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0);
} }
#[cfg(feature = "nightly")] #[cfg(feature = "nightly")]

View File

@@ -11,12 +11,12 @@ use std::task::{Context, Poll};
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use hyper::body::to_bytes as concat;
use hyper::{Body, Client, Method, Request, StatusCode}; use hyper::{Body, Client, Method, Request, StatusCode};
use futures_channel::oneshot; use futures_channel::oneshot;
use futures_core::{Future, Stream, TryFuture}; use futures_core::{Future, Stream, TryFuture};
use futures_util::future::{self, FutureExt, TryFutureExt}; use futures_util::future::{self, FutureExt, TryFutureExt};
use futures_util::StreamExt;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
@@ -28,14 +28,6 @@ fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStr
TcpStream::connect(*addr) TcpStream::connect(*addr)
} }
async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk?);
}
Ok(vec.into())
}
macro_rules! test { macro_rules! test {
( (
name: $name:ident, name: $name:ident,

View File

@@ -355,7 +355,7 @@ async fn async_test(cfg: __TestConfig) {
func(&req.headers()); func(&req.headers());
} }
let sbody = sreq.body; let sbody = sreq.body;
concat(req.into_body()).map_ok(move |body| { hyper::body::to_bytes(req.into_body()).map_ok(move |body| {
assert_eq!(body.as_ref(), sbody.as_slice(), "client body"); assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
let mut res = Response::builder() let mut res = Response::builder()
@@ -410,7 +410,7 @@ async fn async_test(cfg: __TestConfig) {
for func in &cheaders { for func in &cheaders {
func(&res.headers()); func(&res.headers());
} }
concat(res.into_body()) hyper::body::to_bytes(res.into_body())
}) })
.map_ok(move |body| { .map_ok(move |body| {
assert_eq!(body.as_ref(), cbody.as_slice(), "server body"); assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
@@ -473,11 +473,3 @@ fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
let proxy_addr = srv.local_addr(); let proxy_addr = srv.local_addr();
(proxy_addr, srv.map(|res| res.expect("proxy error"))) (proxy_addr, srv.map(|res| res.expect("proxy error")))
} }
async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk?);
}
Ok(vec.into())
}