refactor(lib): update to 2018 edition

This commit is contained in:
Sean McArthur
2019-07-09 14:50:51 -07:00
parent 79ae89e066
commit da9b0319ef
37 changed files with 358 additions and 398 deletions

View File

@@ -8,9 +8,9 @@ use http::{HeaderMap, Method, Version};
use http::header::{HeaderValue, CONNECTION};
use tokio_io::{AsyncRead, AsyncWrite};
use ::Chunk;
use proto::{BodyLength, DecodedLength, MessageHead};
use headers::connection_keep_alive;
use crate::Chunk;
use crate::proto::{BodyLength, DecodedLength, MessageHead};
use crate::headers::connection_keep_alive;
use super::io::{Buffered};
use super::{EncodedBuf, Encode, Encoder, /*Decode,*/ Decoder, Http1Transaction, ParseContext};
@@ -84,7 +84,7 @@ where I: AsyncRead + AsyncWrite,
self.io.into_inner()
}
pub fn pending_upgrade(&mut self) -> Option<::upgrade::Pending> {
pub fn pending_upgrade(&mut self) -> Option<crate::upgrade::Pending> {
self.state.upgrade.take()
}
@@ -129,7 +129,7 @@ where I: AsyncRead + AsyncWrite,
read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
}
pub fn read_head(&mut self) -> Poll<Option<(MessageHead<T::Incoming>, DecodedLength, bool)>, ::Error> {
pub fn read_head(&mut self) -> Poll<Option<(MessageHead<T::Incoming>, DecodedLength, bool)>, crate::Error> {
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
@@ -168,7 +168,7 @@ where I: AsyncRead + AsyncWrite,
Ok(Async::Ready(Some((msg.head, msg.decode, msg.wants_upgrade))))
}
fn on_read_head_error<Z>(&mut self, e: ::Error) -> Poll<Option<Z>, ::Error> {
fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<Z>, crate::Error> {
// If we are currently waiting on a message, then an empty
// message should be reported as an error. If not, it is just
// the connection closing gracefully.
@@ -233,7 +233,7 @@ where I: AsyncRead + AsyncWrite,
ret
}
pub fn read_keep_alive(&mut self) -> Poll<(), ::Error> {
pub fn read_keep_alive(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
if self.is_mid_message() {
@@ -254,22 +254,22 @@ where I: AsyncRead + AsyncWrite,
//
// This should only be called for Clients wanting to enter the idle
// state.
fn require_empty_read(&mut self) -> Poll<(), ::Error> {
fn require_empty_read(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(!self.is_mid_message());
debug_assert!(T::is_client());
if !self.io.read_buf().is_empty() {
debug!("received an unexpected {} bytes", self.io.read_buf().len());
return Err(::Error::new_unexpected_message());
return Err(crate::Error::new_unexpected_message());
}
let num_read = try_ready!(self.force_io_read().map_err(::Error::new_io));
let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
if num_read == 0 {
let ret = if self.should_error_on_eof() {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
Err(::Error::new_incomplete())
Err(crate::Error::new_incomplete())
} else {
trace!("found EOF on idle connection, closing");
Ok(Async::Ready(()))
@@ -281,10 +281,10 @@ where I: AsyncRead + AsyncWrite,
}
debug!("received unexpected {} bytes on an idle connection", num_read);
Err(::Error::new_unexpected_message())
Err(crate::Error::new_unexpected_message())
}
fn mid_message_detect_eof(&mut self) -> Poll<(), ::Error> {
fn mid_message_detect_eof(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(self.is_mid_message());
@@ -292,12 +292,12 @@ where I: AsyncRead + AsyncWrite,
return Ok(Async::NotReady);
}
let num_read = try_ready!(self.force_io_read().map_err(::Error::new_io));
let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
if num_read == 0 {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
self.state.close_read();
Err(::Error::new_incomplete())
Err(crate::Error::new_incomplete())
} else {
Ok(Async::Ready(()))
}
@@ -563,12 +563,12 @@ where I: AsyncRead + AsyncWrite,
//
// - Client: there is nothing we can do
// - Server: if Response hasn't been written yet, we can send a 4xx response
fn on_parse_error(&mut self, err: ::Error) -> ::Result<()> {
fn on_parse_error(&mut self, err: crate::Error) -> crate::Result<()> {
match self.state.writing {
Writing::Init => {
if self.has_h2_prefix() {
return Err(::Error::new_version_h2())
return Err(crate::Error::new_version_h2())
}
if let Some(msg) = T::on_error(&err) {
// Drop the cached headers so as to not trigger a debug
@@ -623,7 +623,7 @@ where I: AsyncRead + AsyncWrite,
}
}
pub fn take_error(&mut self) -> ::Result<()> {
pub fn take_error(&mut self) -> crate::Result<()> {
if let Some(err) = self.state.error.take() {
Err(err)
} else {
@@ -631,7 +631,7 @@ where I: AsyncRead + AsyncWrite,
}
}
pub(super) fn on_upgrade(&mut self) -> ::upgrade::OnUpgrade {
pub(super) fn on_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
trace!("{}: prepare possible HTTP upgrade", T::LOG);
self.state.prepare_upgrade()
}
@@ -658,7 +658,7 @@ struct State {
cached_headers: Option<HeaderMap>,
/// If an error occurs when there wasn't a direct way to return it
/// back to the user, this is set.
error: Option<::Error>,
error: Option<crate::Error>,
/// Current keep-alive status.
keep_alive: KA,
/// If mid-message, the HTTP Method that started it.
@@ -675,7 +675,7 @@ struct State {
/// State of allowed writes
writing: Writing,
/// An expected pending HTTP upgrade.
upgrade: Option<::upgrade::Pending>,
upgrade: Option<crate::upgrade::Pending>,
/// Either HTTP/1.0 or 1.1 connection
version: Version,
}
@@ -868,9 +868,9 @@ impl State {
}
}
fn prepare_upgrade(&mut self) -> ::upgrade::OnUpgrade {
fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
debug_assert!(self.upgrade.is_none());
let (tx, rx) = ::upgrade::pending();
let (tx, rx) = crate::upgrade::pending();
self.upgrade = Some(tx);
rx
}
@@ -888,9 +888,9 @@ mod tests {
let len = s.len();
b.bytes = len as u64;
let mut io = ::mock::AsyncIo::new_buf(Vec::new(), 0);
let mut io = crate::mock::AsyncIo::new_buf(Vec::new(), 0);
io.panic();
let mut conn = Conn::<_, ::Chunk, ::proto::h1::ServerTransaction>::new(io);
let mut conn = Conn::<_, crate::Chunk, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));

View File

@@ -323,7 +323,7 @@ mod tests {
use super::super::io::MemRead;
use futures::{Async, Poll};
use bytes::{BytesMut, Bytes};
use mock::AsyncIo;
use crate::mock::AsyncIo;
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {

View File

@@ -5,17 +5,17 @@ use futures::{Async, Future, Poll, Stream};
use http::{Request, Response, StatusCode};
use tokio_io::{AsyncRead, AsyncWrite};
use body::{Body, Payload};
use body::internal::FullDataArg;
use common::{Never, YieldNow};
use proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
use crate::body::{Body, Payload};
use crate::body::internal::FullDataArg;
use crate::common::{Never, YieldNow};
use crate::proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
use super::Http1Transaction;
use service::Service;
use crate::service::Service;
pub(crate) struct Dispatcher<D, Bs: Payload, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
body_tx: Option<::body::Sender>,
body_tx: Option<crate::body::Sender>,
body_rx: Option<Bs>,
is_closing: bool,
/// If the poll loop reaches its max spin count, it will yield by notifying
@@ -30,7 +30,7 @@ pub(crate) trait Dispatch {
type PollError;
type RecvItem;
fn poll_msg(&mut self) -> Poll<Option<(Self::PollItem, Self::PollBody)>, Self::PollError>;
fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()>;
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>;
fn poll_ready(&mut self) -> Poll<(), ()>;
fn should_poll(&self) -> bool;
}
@@ -41,11 +41,11 @@ pub struct Server<S: Service> {
}
pub struct Client<B> {
callback: Option<::client::dispatch::Callback<Request<B>, Response<Body>>>,
callback: Option<crate::client::dispatch::Callback<Request<B>, Response<Body>>>,
rx: ClientRx<B>,
}
type ClientRx<B> = ::client::dispatch::Receiver<Request<B>, Response<Body>>;
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
where
@@ -80,7 +80,7 @@ where
///
/// This is useful for old-style HTTP upgrades, but ignores
/// newer-style upgrade API.
pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> {
pub fn poll_without_shutdown(&mut self) -> Poll<(), crate::Error> {
self.poll_catch(false)
.map(|x| {
x.map(|ds| if let Dispatched::Upgrade(pending) = ds {
@@ -89,7 +89,7 @@ where
})
}
fn poll_catch(&mut self, should_shutdown: bool) -> Poll<Dispatched, ::Error> {
fn poll_catch(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
self.poll_inner(should_shutdown).or_else(|e| {
// An error means we're shutting down either way.
// We just try to give the error to the user,
@@ -100,7 +100,7 @@ where
})
}
fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, ::Error> {
fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
T::update_date();
try_ready!(self.poll_loop());
@@ -110,7 +110,7 @@ where
self.conn.take_error()?;
return Ok(Async::Ready(Dispatched::Upgrade(pending)));
} else if should_shutdown {
try_ready!(self.conn.shutdown().map_err(::Error::new_shutdown));
try_ready!(self.conn.shutdown().map_err(crate::Error::new_shutdown));
}
self.conn.take_error()?;
Ok(Async::Ready(Dispatched::Shutdown))
@@ -119,7 +119,7 @@ where
}
}
fn poll_loop(&mut self) -> Poll<(), ::Error> {
fn poll_loop(&mut self) -> Poll<(), crate::Error> {
// Limit the looping on this connection, in case it is ready far too
// often, so that other futures don't starve.
//
@@ -155,7 +155,7 @@ where
}
}
fn poll_read(&mut self) -> Poll<(), ::Error> {
fn poll_read(&mut self) -> Poll<(), crate::Error> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
@@ -199,7 +199,7 @@ where
return Ok(Async::NotReady);
}
Err(e) => {
body.send_error(::Error::new_body(e));
body.send_error(crate::Error::new_body(e));
}
}
} else {
@@ -211,7 +211,7 @@ where
}
}
fn poll_read_head(&mut self) -> Poll<(), ::Error> {
fn poll_read_head(&mut self) -> Poll<(), crate::Error> {
// can dispatch receive, or does it still care about, an incoming message?
match self.dispatch.poll_ready() {
Ok(Async::Ready(())) => (),
@@ -255,12 +255,12 @@ where
}
}
fn poll_write(&mut self) -> Poll<(), ::Error> {
fn poll_write(&mut self) -> Poll<(), crate::Error> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
} else if self.body_rx.is_none() && self.conn.can_write_head() && self.dispatch.should_poll() {
if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(::Error::new_user_service)) {
if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(crate::Error::new_user_service)) {
// Check if the body knows its full data immediately.
//
// If so, we can skip a bit of bookkeeping that streaming
@@ -294,7 +294,7 @@ where
);
continue;
}
match body.poll_data().map_err(::Error::new_user_body)? {
match body.poll_data().map_err(crate::Error::new_user_body)? {
Async::Ready(Some(chunk)) => {
let eos = body.is_end_stream();
if eos {
@@ -327,10 +327,10 @@ where
}
}
fn poll_flush(&mut self) -> Poll<(), ::Error> {
fn poll_flush(&mut self) -> Poll<(), crate::Error> {
self.conn.flush().map_err(|err| {
debug!("error writing: {}", err);
::Error::new_body_write(err)
crate::Error::new_body_write(err)
})
}
@@ -367,7 +367,7 @@ where
Bs: Payload,
{
type Item = Dispatched;
type Error = ::Error;
type Error = crate::Error;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
@@ -421,7 +421,7 @@ where
}
}
fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> {
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
let (msg, body) = msg?;
let mut req = Request::new(body);
*req.method_mut() = msg.subject.0;
@@ -501,7 +501,7 @@ where
}
}
fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> {
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
match msg {
Ok((msg, body)) => {
if let Some(cb) = self.callback.take() {
@@ -515,7 +515,7 @@ where
// Getting here is likely a bug! An error should have happened
// in Conn::require_empty_read() before ever parsing a
// full message!
Err(::Error::new_unexpected_message())
Err(crate::Error::new_unexpected_message())
}
},
Err(err) => {
@@ -526,7 +526,7 @@ where
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
let _ = cb.send(Err((::Error::new_canceled().with(err), Some(req))));
let _ = cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
Ok(())
} else {
Err(err)
@@ -559,8 +559,8 @@ mod tests {
extern crate pretty_env_logger;
use super::*;
use mock::AsyncIo;
use proto::h1::ClientTransaction;
use crate::mock::AsyncIo;
use crate::proto::h1::ClientTransaction;
#[test]
fn client_read_bytes_before_writing_request() {
@@ -569,8 +569,8 @@ mod tests {
// Block at 0 for now, but we will release this response before
// the request is ready to write later...
let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0);
let (mut tx, rx) = ::client::dispatch::channel();
let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io);
let (mut tx, rx) = crate::client::dispatch::channel();
let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
@@ -578,7 +578,7 @@ mod tests {
// Unblock our IO, which has a response before we've sent request!
dispatcher.conn.io_mut().block_in(100);
let res_rx = tx.try_send(::Request::new(::Body::empty())).unwrap();
let res_rx = tx.try_send(crate::Request::new(crate::Body::empty())).unwrap();
let a1 = dispatcher.poll().expect("error should be sent on channel");
assert!(a1.is_ready(), "dispatcher should be closed");
@@ -587,7 +587,7 @@ mod tests {
.expect_err("callback response");
match (err.0.kind(), err.1) {
(&::error::Kind::Canceled, Some(_)) => (),
(&crate::error::Kind::Canceled, Some(_)) => (),
other => panic!("expected Canceled, got {:?}", other),
}
Ok::<(), ()>(())
@@ -599,16 +599,16 @@ mod tests {
let _ = pretty_env_logger::try_init();
::futures::lazy(|| {
let io = AsyncIo::new_buf(vec![], 0);
let (mut tx, rx) = ::client::dispatch::channel();
let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io);
let (mut tx, rx) = crate::client::dispatch::channel();
let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
assert!(dispatcher.poll().expect("nothing is ready").is_not_ready());
let body = ::Body::wrap_stream(::futures::stream::once(Ok::<_, ::Error>("")));
let body = crate::Body::wrap_stream(::futures::stream::once(Ok::<_, crate::Error>("")));
let _res_rx = tx.try_send(::Request::new(body)).unwrap();
let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
dispatcher.poll().expect("empty body shouldn't panic");
Ok::<(), ()>(())

View File

@@ -4,7 +4,7 @@ use bytes::{Buf, IntoBuf};
use bytes::buf::{Chain, Take};
use iovec::IoVec;
use common::StaticBuf;
use crate::common::StaticBuf;
use super::io::WriteBuf;
/// Encoders to handle different Transfer-Encodings.

View File

@@ -136,7 +136,7 @@ where
}
pub(super) fn parse<S>(&mut self, ctx: ParseContext)
-> Poll<ParsedMessage<S::Incoming>, ::Error>
-> Poll<ParsedMessage<S::Incoming>, crate::Error>
where
S: Http1Transaction,
{
@@ -153,14 +153,14 @@ where
let max = self.read_buf_strategy.max();
if self.read_buf.len() >= max {
debug!("max_buf_size ({}) reached, closing", max);
return Err(::Error::new_too_large());
return Err(crate::Error::new_too_large());
}
},
}
match try_ready!(self.read_from_io().map_err(::Error::new_io)) {
match try_ready!(self.read_from_io().map_err(crate::Error::new_io)) {
0 => {
trace!("parse eof");
return Err(::Error::new_incomplete());
return Err(crate::Error::new_incomplete());
}
_ => {},
}
@@ -651,13 +651,13 @@ impl<T: Buf> Buf for BufDeque<T> {
mod tests {
use super::*;
use std::io::Read;
use mock::AsyncIo;
use crate::mock::AsyncIo;
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(test)]
impl<T: Read> MemRead for ::mock::AsyncIo<T> {
impl<T: Read> MemRead for crate::mock::AsyncIo<T> {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
let mut v = vec![0; len];
let n = try_nb!(self.read(v.as_mut_slice()));
@@ -689,7 +689,7 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
};
assert!(buffered.parse::<::proto::h1::ClientTransaction>(ctx).unwrap().is_not_ready());
assert!(buffered.parse::<crate::proto::h1::ClientTransaction>(ctx).unwrap().is_not_ready());
assert!(buffered.io.blocked());
}
@@ -890,10 +890,10 @@ mod tests {
let s = "Hello, World!";
b.bytes = s.len() as u64;
let mut write_buf = WriteBuf::<::Chunk>::new();
let mut write_buf = WriteBuf::<crate::Chunk>::new();
write_buf.set_strategy(WriteStrategy::Flatten);
b.iter(|| {
let chunk = ::Chunk::from(s);
let chunk = crate::Chunk::from(s);
write_buf.buffer(chunk);
::test::black_box(&write_buf);
write_buf.headers.bytes.clear();

View File

@@ -1,7 +1,7 @@
use bytes::BytesMut;
use http::{HeaderMap, Method};
use proto::{MessageHead, BodyLength, DecodedLength};
use crate::proto::{MessageHead, BodyLength, DecodedLength};
pub(crate) use self::conn::Conn;
pub(crate) use self::dispatch::Dispatcher;
@@ -27,9 +27,9 @@ pub(crate) trait Http1Transaction {
type Outgoing: Default;
const LOG: &'static str;
fn parse(bytes: &mut BytesMut, ctx: ParseContext) -> ParseResult<Self::Incoming>;
fn encode(enc: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> ::Result<Encoder>;
fn encode(enc: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder>;
fn on_error(err: &::Error) -> Option<MessageHead<Self::Outgoing>>;
fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>>;
fn is_client() -> bool {
!Self::is_server()
@@ -51,7 +51,7 @@ pub(crate) trait Http1Transaction {
}
/// Result newtype for Http1Transaction::parse.
pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, ::error::Parse>;
pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>;
#[derive(Debug)]
pub(crate) struct ParsedMessage<T> {

View File

@@ -10,10 +10,10 @@ use http::header::{self, Entry, HeaderName, HeaderValue};
use http::{HeaderMap, Method, StatusCode, Version};
use httparse;
use error::Parse;
use headers;
use proto::{BodyLength, DecodedLength, MessageHead, RequestLine, RequestHead};
use proto::h1::{Encode, Encoder, Http1Transaction, ParseResult, ParseContext, ParsedMessage, date};
use crate::error::Parse;
use crate::headers;
use crate::proto::{BodyLength, DecodedLength, MessageHead, RequestLine, RequestHead};
use crate::proto::h1::{Encode, Encoder, Http1Transaction, ParseResult, ParseContext, ParsedMessage, date};
const MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
@@ -239,7 +239,7 @@ impl Http1Transaction for Server {
}))
}
fn encode(mut msg: Encode<Self::Outgoing>, mut dst: &mut Vec<u8>) -> ::Result<Encoder> {
fn encode(mut msg: Encode<Self::Outgoing>, mut dst: &mut Vec<u8>) -> crate::Result<Encoder> {
trace!(
"Server::encode status={:?}, body={:?}, req_method={:?}",
msg.head.subject,
@@ -266,7 +266,7 @@ impl Http1Transaction for Server {
*msg.head = MessageHead::default();
msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR;
msg.body = None;
(Err(::Error::new_user_unsupported_status_code()), true)
(Err(crate::Error::new_user_unsupported_status_code()), true)
} else {
(Ok(()), !msg.keep_alive)
};
@@ -309,7 +309,7 @@ impl Http1Transaction for Server {
if wrote_len {
warn!("unexpected content-length found, canceling");
rewind(dst);
return Err(::Error::new_user_header());
return Err(crate::Error::new_user_header());
}
match msg.body {
Some(BodyLength::Known(known_len)) => {
@@ -369,7 +369,7 @@ impl Http1Transaction for Server {
if fold.0 != len {
warn!("multiple Content-Length values found: [{}, {}]", fold.0, len);
rewind(dst);
return Err(::Error::new_user_header());
return Err(crate::Error::new_user_header());
}
folded = Some(fold);
} else {
@@ -378,7 +378,7 @@ impl Http1Transaction for Server {
} else {
warn!("illegal Content-Length value: {:?}", value);
rewind(dst);
return Err(::Error::new_user_header());
return Err(crate::Error::new_user_header());
}
}
if let Some((len, value)) = folded {
@@ -418,7 +418,7 @@ impl Http1Transaction for Server {
if wrote_len {
warn!("unexpected transfer-encoding found, canceling");
rewind(dst);
return Err(::Error::new_user_header());
return Err(crate::Error::new_user_header());
}
// check that we actually can send a chunked body...
if msg.head.version == Version::HTTP_10 || !Server::can_chunked(msg.req_method, msg.head.subject) {
@@ -531,8 +531,8 @@ impl Http1Transaction for Server {
ret.map(|()| encoder.set_last(is_last))
}
fn on_error(err: &::Error) -> Option<MessageHead<Self::Outgoing>> {
use ::error::Kind;
fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
use crate::error::Kind;
let status = match *err.kind() {
Kind::Parse(Parse::Method) |
Kind::Parse(Parse::Header) |
@@ -666,7 +666,7 @@ impl Http1Transaction for Client {
}
}
fn encode(msg: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> ::Result<Encoder> {
fn encode(msg: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder> {
trace!("Client::encode method={:?}, body={:?}", msg.head.subject.0, msg.body);
*msg.req_method = Some(msg.head.subject.0.clone());
@@ -704,7 +704,7 @@ impl Http1Transaction for Client {
Ok(body)
}
fn on_error(_err: &::Error) -> Option<MessageHead<Self::Outgoing>> {
fn on_error(_err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
// we can't tell the server about any errors it creates
None
}
@@ -937,7 +937,7 @@ fn record_header_indices(
bytes: &[u8],
headers: &[httparse::Header],
indices: &mut [HeaderIndices]
) -> Result<(), ::error::Parse> {
) -> Result<(), crate::error::Parse> {
let bytes_ptr = bytes.as_ptr() as usize;
// FIXME: This should be a single plain `for` loop.
@@ -966,7 +966,7 @@ fn record_header_indices(
{
if header.name.len() >= (1 << 16) {
debug!("header name larger than 64kb: {:?}", header.name);
return Err(::error::Parse::TooLarge);
return Err(crate::error::Parse::TooLarge);
}
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
@@ -1071,12 +1071,12 @@ mod tests {
req_method: &mut method,
}).unwrap().unwrap();
assert_eq!(raw.len(), 0);
assert_eq!(msg.head.subject.0, ::Method::GET);
assert_eq!(msg.head.subject.0, crate::Method::GET);
assert_eq!(msg.head.subject.1, "/echo");
assert_eq!(msg.head.version, ::Version::HTTP_11);
assert_eq!(msg.head.version, crate::Version::HTTP_11);
assert_eq!(msg.head.headers.len(), 1);
assert_eq!(msg.head.headers["Host"], "hyper.rs");
assert_eq!(method, Some(::Method::GET));
assert_eq!(method, Some(crate::Method::GET));
}
@@ -1087,12 +1087,12 @@ mod tests {
let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec());
let ctx = ParseContext {
cached_headers: &mut None,
req_method: &mut Some(::Method::GET),
req_method: &mut Some(crate::Method::GET),
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw.len(), 0);
assert_eq!(msg.head.subject, ::StatusCode::OK);
assert_eq!(msg.head.version, ::Version::HTTP_11);
assert_eq!(msg.head.subject, crate::StatusCode::OK);
assert_eq!(msg.head.version, crate::Version::HTTP_11);
assert_eq!(msg.head.headers.len(), 1);
assert_eq!(msg.head.headers["Content-Length"], "0");
}
@@ -1120,7 +1120,7 @@ mod tests {
.expect("parse complete")
}
fn parse_err(s: &str, comment: &str) -> ::error::Parse {
fn parse_err(s: &str, comment: &str) -> crate::error::Parse {
let mut bytes = BytesMut::from(s);
Server::parse(&mut bytes, ParseContext {
cached_headers: &mut None,
@@ -1266,7 +1266,7 @@ mod tests {
.expect("parse complete")
}
fn parse_err(s: &str) -> ::error::Parse {
fn parse_err(s: &str) -> crate::error::Parse {
let mut bytes = BytesMut::from(s);
Client::parse(&mut bytes, ParseContext {
cached_headers: &mut None,
@@ -1423,7 +1423,7 @@ mod tests {
#[test]
fn test_client_request_encode_title_case() {
use http::header::HeaderValue;
use proto::BodyLength;
use crate::proto::BodyLength;
let mut head = MessageHead::default();
head.headers.insert("content-length", HeaderValue::from_static("10"));
@@ -1553,7 +1553,7 @@ mod tests {
#[bench]
fn bench_server_encode_headers_preset(b: &mut Bencher) {
use http::header::HeaderValue;
use proto::BodyLength;
use crate::proto::BodyLength;
let len = 108;
b.bytes = len as u64;
@@ -1581,7 +1581,7 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn bench_server_encode_no_headers(b: &mut Bencher) {
use proto::BodyLength;
use crate::proto::BodyLength;
let len = 76;
b.bytes = len as u64;

View File

@@ -5,15 +5,15 @@ use futures::sync::{mpsc, oneshot};
use h2::client::{Builder, Handshake, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite};
use headers::content_length_parse_all;
use body::Payload;
use ::common::{Exec, Never};
use headers;
use ::proto::Dispatched;
use crate::headers::content_length_parse_all;
use crate::body::Payload;
use crate::common::{Exec, Never};
use crate::headers;
use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
use ::{Body, Request, Response};
use crate::{Body, Request, Response};
type ClientRx<B> = ::client::dispatch::Receiver<Request<B>, Response<Body>>;
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
/// An mpsc channel is used to help notify the `Connection` task when *all*
/// other handles to it have been dropped, so that it can shutdown.
type ConnDropRef = mpsc::Sender<Never>;
@@ -58,13 +58,13 @@ where
B: Payload + 'static,
{
type Item = Dispatched;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
let (request_tx, conn) = try_ready!(h.poll().map_err(::Error::new_h2));
let (request_tx, conn) = try_ready!(h.poll().map_err(crate::Error::new_h2));
// An mpsc channel is used entirely to detect when the
// 'Client' has been dropped. This is to get around a bug
// in h2 where dropping all SendRequests won't notify a
@@ -111,7 +111,7 @@ where
trace!("connection gracefully shutdown");
Ok(Async::Ready(Dispatched::Shutdown))
} else {
Err(::Error::new_h2(err))
Err(crate::Error::new_h2(err))
};
}
}
@@ -133,7 +133,7 @@ where
Ok(ok) => ok,
Err(err) => {
debug!("client send request error: {}", err);
cb.send(Err((::Error::new_h2(err), None)));
cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
};
@@ -162,12 +162,12 @@ where
Ok(res) => {
let content_length = content_length_parse_all(res.headers());
let res = res.map(|stream|
::Body::h2(stream, content_length));
crate::Body::h2(stream, content_length));
Ok(res)
},
Err(err) => {
debug!("client response error: {}", err);
Err((::Error::new_h2(err), None))
Err((crate::Error::new_h2(err), None))
}
}
});

View File

@@ -7,7 +7,7 @@ use http::header::{
};
use http::HeaderMap;
use body::Payload;
use crate::body::Payload;
mod client;
pub(crate) mod server;
@@ -91,18 +91,18 @@ where
}
}
fn on_user_err(&mut self, err: S::Error) -> ::Error {
let err = ::Error::new_user_body(err);
fn on_user_err(&mut self, err: S::Error) -> crate::Error {
let err = crate::Error::new_user_body(err);
debug!("send body user stream error: {}", err);
self.body_tx.send_reset(err.h2_reason());
err
}
fn send_eos_frame(&mut self) -> ::Result<()> {
fn send_eos_frame(&mut self) -> crate::Result<()> {
trace!("send body eos");
self.body_tx
.send_data(SendBuf(None), true)
.map_err(::Error::new_body_write)
.map_err(crate::Error::new_body_write)
}
}
@@ -111,7 +111,7 @@ where
S: Payload,
{
type Item = ();
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
@@ -123,18 +123,18 @@ where
if self.body_tx.capacity() == 0 {
loop {
match try_ready!(self.body_tx.poll_capacity().map_err(::Error::new_body_write)) {
match try_ready!(self.body_tx.poll_capacity().map_err(crate::Error::new_body_write)) {
Some(0) => {}
Some(_) => break,
None => return Err(::Error::new_canceled()),
None => return Err(crate::Error::new_canceled()),
}
}
} else {
if let Async::Ready(reason) =
self.body_tx.poll_reset().map_err(::Error::new_body_write)?
self.body_tx.poll_reset().map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(::Error::new_body_write(::h2::Error::from(reason)));
return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
}
}
@@ -150,7 +150,7 @@ where
let buf = SendBuf(Some(chunk));
self.body_tx
.send_data(buf, is_eos)
.map_err(::Error::new_body_write)?;
.map_err(crate::Error::new_body_write)?;
if is_eos {
return Ok(Async::Ready(()));
@@ -169,17 +169,17 @@ where
}
} else {
if let Async::Ready(reason) =
self.body_tx.poll_reset().map_err(|e| ::Error::new_body_write(e))?
self.body_tx.poll_reset().map_err(|e| crate::Error::new_body_write(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(::Error::new_body_write(::h2::Error::from(reason)));
return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
}
match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_user_err(e))) {
Some(trailers) => {
self.body_tx
.send_trailers(trailers)
.map_err(::Error::new_body_write)?;
.map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
}
None => {

View File

@@ -5,16 +5,16 @@ use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite};
use ::headers::content_length_parse_all;
use ::body::Payload;
use body::internal::FullDataArg;
use ::common::exec::H2Exec;
use ::headers;
use ::service::Service;
use ::proto::Dispatched;
use crate::headers::content_length_parse_all;
use crate::body::Payload;
use crate::body::internal::FullDataArg;
use crate::common::exec::H2Exec;
use crate::headers;
use crate::service::Service;
use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
use ::{Body, Response};
use crate::{Body, Response};
pub(crate) struct Server<T, S, B, E>
where
@@ -40,7 +40,7 @@ where
B: Payload,
{
conn: Connection<T, SendBuf<B::Data>>,
closing: Option<::Error>,
closing: Option<crate::Error>,
}
@@ -90,13 +90,13 @@ where
E: H2Exec<S::Future, B>,
{
type Item = Dispatched;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
let conn = try_ready!(h.poll().map_err(::Error::new_h2));
let conn = try_ready!(h.poll().map_err(crate::Error::new_h2));
State::Serving(Serving {
conn,
closing: None,
@@ -122,7 +122,7 @@ where
T: AsyncRead + AsyncWrite,
B: Payload,
{
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<(), ::Error>
fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<(), crate::Error>
where
S: Service<
ReqBody=Body,
@@ -138,12 +138,12 @@ where
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
// use `poll_close` instead of `poll`, in order to avoid accepting a request.
try_ready!(self.conn.poll_close().map_err(::Error::new_h2));
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
trace!("incoming connection complete");
return Ok(Async::Ready(()));
}
Err(err) => {
let err = ::Error::new_user_service(err);
let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err);
let reason = err.h2_reason();
@@ -161,11 +161,11 @@ where
}
// When the service is ready, accepts an incoming request.
if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(::Error::new_h2)) {
if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(crate::Error::new_h2)) {
trace!("incoming request");
let content_length = content_length_parse_all(req.headers());
let req = req.map(|stream| {
::Body::h2(stream, content_length)
crate::Body::h2(stream, content_length)
});
let fut = H2Stream::new(service.call(req), respond);
exec.execute_h2stream(fut)?;
@@ -179,7 +179,7 @@ where
debug_assert!(self.closing.is_some(), "poll_server broke loop without closing");
try_ready!(self.conn.poll_close().map_err(::Error::new_h2));
try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
Err(self.closing.take().expect("polled after error"))
}
@@ -215,7 +215,7 @@ where
}
}
fn poll2(&mut self) -> Poll<(), ::Error> {
fn poll2(&mut self) -> Poll<(), crate::Error> {
loop {
let next = match self.state {
H2StreamState::Service(ref mut h) => {
@@ -225,15 +225,15 @@ where
// Body is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request.
if let Async::Ready(reason) =
self.reply.poll_reset().map_err(|e| ::Error::new_h2(e))?
self.reply.poll_reset().map_err(|e| crate::Error::new_h2(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Err(::Error::new_h2(reason.into()));
return Err(crate::Error::new_h2(reason.into()));
}
return Ok(Async::NotReady);
}
Err(e) => {
let err = ::Error::new_user_service(e);
let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err);
self.reply.send_reset(err.h2_reason());
return Err(err);
@@ -249,7 +249,7 @@ where
.headers_mut()
.entry(::http::header::DATE)
.expect("DATE is a valid HeaderName")
.or_insert_with(::proto::h1::date::update_and_header_value);
.or_insert_with(crate::proto::h1::date::update_and_header_value);
macro_rules! reply {
($eos:expr) => ({
@@ -258,7 +258,7 @@ where
Err(e) => {
debug!("send response error: {}", e);
self.reply.send_reset(Reason::INTERNAL_ERROR);
return Err(::Error::new_h2(e));
return Err(crate::Error::new_h2(e));
}
}
})
@@ -274,7 +274,7 @@ where
let buf = SendBuf(Some(full));
body_tx
.send_data(buf, true)
.map_err(::Error::new_body_write)?;
.map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
}

View File

@@ -40,7 +40,7 @@ pub(crate) enum Dispatched {
/// Dispatcher completely shutdown connection.
Shutdown,
/// Dispatcher has pending upgrade, and so did not shutdown.
Upgrade(::upgrade::Pending),
Upgrade(crate::upgrade::Pending),
}
/// A separate module to encapsulate the invariants of the DecodedLength type.
@@ -83,12 +83,12 @@ mod body_length {
}
/// Checks the `u64` is within the maximum allowed for content-length.
pub(crate) fn checked_new(len: u64) -> Result<Self, ::error::Parse> {
pub(crate) fn checked_new(len: u64) -> Result<Self, crate::error::Parse> {
if len <= MAX_LEN {
Ok(DecodedLength(len))
} else {
warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN);
Err(::error::Parse::TooLarge)
Err(crate::error::Parse::TooLarge)
}
}
}