test(benches): re-enable pipeline and server bench (#2934)

re-enable the recently disabled pipeline and server bench using
`hyper::server::conn` instead of the removed higher-level `Server` api
This commit is contained in:
Oddbjørn Grødem
2022-08-02 15:46:26 +02:00
committed by GitHub
parent 3c7bef3b6f
commit c558647762
2 changed files with 181 additions and 177 deletions

View File

@@ -3,87 +3,85 @@
extern crate test;
// TODO: Reimplement hello_world_16 bench using hyper::server::conn
// (instead of Server).
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpStream};
use std::sync::mpsc;
use std::time::Duration;
// use std::io::{Read, Write};
// use std::net::TcpStream;
// use std::sync::mpsc;
// use std::time::Duration;
use tokio::net::TcpListener;
use tokio::sync::oneshot;
// use tokio::sync::oneshot;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Body, Response};
// use hyper::service::{make_service_fn, service_fn};
// use hyper::{Body, Response, Server};
const PIPELINED_REQUESTS: usize = 16;
// const PIPELINED_REQUESTS: usize = 16;
#[bench]
fn hello_world_16(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let (_until_tx, until_rx) = oneshot::channel::<()>();
// #[bench]
// fn hello_world_16(b: &mut test::Bencher) {
// let _ = pretty_env_logger::try_init();
// let (_until_tx, until_rx) = oneshot::channel::<()>();
let addr = {
let (addr_tx, addr_rx) = mpsc::channel();
std::thread::spawn(move || {
let addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("rt build");
// let addr = {
// let (addr_tx, addr_rx) = mpsc::channel();
// std::thread::spawn(move || {
// let addr = "127.0.0.1:0".parse().unwrap();
let listener = rt.block_on(TcpListener::bind(addr)).unwrap();
let addr = listener.local_addr().unwrap();
// let make_svc = make_service_fn(|_| async {
// Ok::<_, hyper::Error>(service_fn(|_| async {
// Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
// }))
// });
rt.spawn(async move {
loop {
let (stream, _addr) = listener.accept().await.expect("accept");
// let rt = tokio::runtime::Builder::new_current_thread()
// .enable_all()
// .build()
// .expect("rt build");
// let srv = rt.block_on(async move {
// Server::bind(&addr)
// .http1_pipeline_flush(true)
// .serve(make_svc)
// });
Http::new()
.pipeline_flush(true)
.serve_connection(
stream,
service_fn(|_| async {
Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
}),
)
.await
.unwrap();
}
});
// addr_tx.send(srv.local_addr()).unwrap();
addr_tx.send(addr).unwrap();
rt.block_on(until_rx).ok();
});
// let graceful = srv.with_graceful_shutdown(async {
// until_rx.await.ok();
// });
addr_rx.recv().unwrap()
};
// rt.block_on(async {
// if let Err(e) = graceful.await {
// panic!("server error: {}", e);
// }
// });
// });
let mut pipelined_reqs = Vec::new();
for _ in 0..PIPELINED_REQUESTS {
pipelined_reqs.extend_from_slice(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n");
}
// addr_rx.recv().unwrap()
// };
let total_bytes = {
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
.unwrap();
let mut buf = Vec::new();
tcp.read_to_end(&mut buf).unwrap()
} * PIPELINED_REQUESTS;
// let mut pipelined_reqs = Vec::new();
// for _ in 0..PIPELINED_REQUESTS {
// pipelined_reqs.extend_from_slice(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n");
// }
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
let mut buf = [0u8; 8192];
// let total_bytes = {
// let mut tcp = TcpStream::connect(addr).unwrap();
// tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
// .unwrap();
// let mut buf = Vec::new();
// tcp.read_to_end(&mut buf).unwrap()
// } * PIPELINED_REQUESTS;
// let mut tcp = TcpStream::connect(addr).unwrap();
// tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
// let mut buf = [0u8; 8192];
// b.bytes = (pipelined_reqs.len() + total_bytes) as u64;
// b.iter(|| {
// tcp.write_all(&pipelined_reqs).unwrap();
// let mut sum = 0;
// while sum < total_bytes {
// sum += tcp.read(&mut buf).unwrap();
// }
// assert_eq!(sum, total_bytes);
// });
// }
b.bytes = (pipelined_reqs.len() + total_bytes) as u64;
b.iter(|| {
tcp.write_all(&pipelined_reqs).unwrap();
let mut sum = 0;
while sum < total_bytes {
sum += tcp.read(&mut buf).unwrap();
}
assert_eq!(sum, total_bytes);
});
}