Update crate to Rust 2018 (#383)

This commit is contained in:
Jakub Beránek
2019-07-23 19:18:43 +02:00
committed by Sean McArthur
parent b3351e675b
commit db6b841e67
68 changed files with 478 additions and 660 deletions

View File

@@ -15,6 +15,7 @@ readme = "README.md"
keywords = ["http", "async", "non-blocking"]
categories = ["asynchronous", "web-programming", "network-programming"]
exclude = ["fixtures/**", "ci/**"]
edition = "2018"
publish = false

View File

@@ -1,13 +1,3 @@
extern crate env_logger;
extern crate futures;
extern crate h2;
extern crate http;
extern crate rustls;
extern crate tokio;
extern crate tokio_rustls;
extern crate webpki;
extern crate webpki_roots;
use h2::client;
use futures::*;

View File

@@ -1,9 +1,3 @@
extern crate env_logger;
extern crate futures;
extern crate h2;
extern crate http;
extern crate tokio;
use h2::client;
use h2::RecvStream;

View File

@@ -1,10 +1,3 @@
extern crate bytes;
extern crate env_logger;
extern crate futures;
extern crate h2;
extern crate http;
extern crate tokio;
use h2::server;
use bytes::*;

View File

@@ -65,11 +65,6 @@
//! # Example
//!
//! ```rust
//! extern crate futures;
//! extern crate h2;
//! extern crate http;
//! extern crate tokio;
//!
//! use h2::client;
//!
//! use futures::*;
@@ -156,13 +151,13 @@
//! [`Builder`]: struct.Builder.html
//! [`Error`]: ../struct.Error.html
use {SendStream, RecvStream, ReleaseCapacity, PingPong};
use codec::{Codec, RecvError, SendError, UserError};
use frame::{Headers, Pseudo, Reason, Settings, StreamId};
use proto;
use crate::{SendStream, RecvStream, ReleaseCapacity, PingPong};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId};
use crate::proto;
use bytes::{Bytes, IntoBuf};
use futures::{Async, Future, Poll, Stream};
use futures::{Async, Future, Poll, Stream, try_ready};
use http::{uri, HeaderMap, Request, Response, Method, Version};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::WriteAll;
@@ -251,10 +246,6 @@ pub struct ReadySendRequest<B: IntoBuf> {
/// # Examples
///
/// ```
/// # extern crate bytes;
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use futures::{Future, Stream};
/// # use futures::future::Executor;
/// # use tokio_io::*;
@@ -344,8 +335,6 @@ pub struct PushPromises {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -408,7 +397,7 @@ where
/// See [module] level docs for more details.
///
/// [module]: index.html
pub fn poll_ready(&mut self) -> Poll<(), ::Error> {
pub fn poll_ready(&mut self) -> Poll<(), crate::Error> {
try_ready!(self.inner.poll_pending_open(self.pending.as_ref()));
self.pending = None;
Ok(().into())
@@ -426,9 +415,6 @@ where
/// # Examples
///
/// ```rust
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate http;
/// # use futures::*;
/// # use h2::client::*;
/// # use http::*;
@@ -493,9 +479,6 @@ where
/// Sending a request with no body
///
/// ```rust
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate http;
/// # use futures::*;
/// # use h2::client::*;
/// # use http::*;
@@ -529,9 +512,6 @@ where
/// Sending a request with a body and trailers
///
/// ```rust
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate http;
/// # use futures::*;
/// # use h2::client::*;
/// # use http::*;
@@ -586,7 +566,7 @@ where
&mut self,
request: Request<()>,
end_of_stream: bool,
) -> Result<(ResponseFuture, SendStream<B>), ::Error> {
) -> Result<(ResponseFuture, SendStream<B>), crate::Error> {
self.inner
.send_request(request, end_of_stream, self.pending.as_ref())
.map_err(Into::into)
@@ -658,7 +638,7 @@ where B: IntoBuf,
B::Buf: 'static,
{
type Item = SendRequest<B>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner {
@@ -683,8 +663,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -726,8 +704,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -762,8 +738,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -797,8 +771,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -838,8 +810,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -888,8 +858,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -930,8 +898,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -976,8 +942,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -1022,8 +986,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// # use std::time::Duration;
@@ -1061,8 +1023,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// # use std::time::Duration;
@@ -1118,8 +1078,6 @@ impl Builder {
/// Basic usage:
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -1140,8 +1098,6 @@ impl Builder {
/// type will be `&'static [u8]`.
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::client::*;
/// #
@@ -1193,9 +1149,6 @@ impl Default for Builder {
/// # Examples
///
/// ```
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use futures::*;
/// # use tokio_io::*;
/// # use h2::client;
@@ -1231,7 +1184,7 @@ where
fn handshake2(io: T, builder: Builder) -> Handshake<T, B> {
use tokio_io::io;
debug!("binding client connection");
log::debug!("binding client connection");
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
let handshake = io::write_all(io, msg);
@@ -1283,9 +1236,9 @@ where
B: IntoBuf,
{
type Item = ();
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<(), ::Error> {
fn poll(&mut self) -> Poll<(), crate::Error> {
self.inner.maybe_close_connection_if_no_streams();
self.inner.poll().map_err(Into::into)
}
@@ -1312,15 +1265,15 @@ where
B::Buf: 'static,
{
type Item = (SendRequest<B>, Connection<T, B>);
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let res = self.inner.poll()
.map_err(::Error::from);
.map_err(crate::Error::from);
let (io, _) = try_ready!(res);
debug!("client connection bound");
log::debug!("client connection bound");
// Create the codec
let mut codec = Codec::new(io);
@@ -1375,7 +1328,7 @@ where
impl Future for ResponseFuture {
type Item = Response<RecvStream>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (parts, _) = try_ready!(self.inner.poll_response()).into_parts();
@@ -1391,8 +1344,8 @@ impl ResponseFuture {
/// # Panics
///
/// If the lock on the stream store has been poisoned.
pub fn stream_id(&self) -> ::StreamId {
::StreamId::from_internal(self.inner.stream_id())
pub fn stream_id(&self) -> crate::StreamId {
crate::StreamId::from_internal(self.inner.stream_id())
}
/// Returns a stream of PushPromises
///
@@ -1413,7 +1366,7 @@ impl ResponseFuture {
impl Stream for PushPromises {
type Item = PushPromise;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match try_ready!(self.inner.poll_pushed()) {
@@ -1454,7 +1407,7 @@ impl PushPromise {
impl Future for PushedResponseFuture {
type Item = Response<RecvStream>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.inner.poll()
@@ -1467,7 +1420,7 @@ impl PushedResponseFuture {
/// # Panics
///
/// If the lock on the stream store has been poisoned.
pub fn stream_id(&self) -> ::StreamId {
pub fn stream_id(&self) -> crate::StreamId {
self.inner.stream_id()
}
}
@@ -1541,7 +1494,7 @@ impl Peer {
impl proto::Peer for Peer {
type Poll = Response<()>;
fn dyn() -> proto::DynPeer {
fn r#dyn() -> proto::DynPeer {
proto::DynPeer::Client
}

View File

@@ -1,4 +1,4 @@
use frame::{Reason, StreamId};
use crate::frame::{Reason, StreamId};
use std::{error, fmt, io};

View File

@@ -1,8 +1,8 @@
use codec::RecvError;
use frame::{self, Frame, Kind, Reason};
use frame::{DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE};
use crate::codec::RecvError;
use crate::frame::{self, Frame, Kind, Reason};
use crate::frame::{DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE};
use hpack;
use crate::hpack;
use futures::*;
@@ -57,7 +57,7 @@ impl<T> FramedRead<T> {
fn decode_frame(&mut self, mut bytes: BytesMut) -> Result<Option<Frame>, RecvError> {
use self::RecvError::*;
trace!("decoding frame from {}B", bytes.len());
log::trace!("decoding frame from {}B", bytes.len());
// Parse the head
let head = frame::Head::parse(&bytes);
@@ -69,7 +69,7 @@ impl<T> FramedRead<T> {
let kind = head.kind();
trace!(" -> kind={:?}", kind);
log::trace!(" -> kind={:?}", kind);
macro_rules! header_block {
($frame:ident, $head:ident, $bytes:ident) => ({
@@ -119,7 +119,7 @@ impl<T> FramedRead<T> {
if is_end_headers {
frame.into()
} else {
trace!("loaded partial header block");
log::trace!("loaded partial header block");
// Defer returning the frame
self.partial = Some(Partial {
frame: Continuable::$frame(frame),
@@ -330,15 +330,15 @@ where
fn poll(&mut self) -> Poll<Option<Frame>, Self::Error> {
loop {
trace!("poll");
log::trace!("poll");
let bytes = match try_ready!(self.inner.poll().map_err(map_err)) {
Some(bytes) => bytes,
None => return Ok(Async::Ready(None)),
};
trace!("poll; bytes={}B", bytes.len());
log::trace!("poll; bytes={}B", bytes.len());
if let Some(frame) = self.decode_frame(bytes)? {
debug!("received; frame={:?}", frame);
log::debug!("received; frame={:?}", frame);
return Ok(Async::Ready(Some(frame)));
}
}

View File

@@ -1,11 +1,11 @@
use codec::UserError;
use codec::UserError::*;
use frame::{self, Frame, FrameSize};
use hpack;
use crate::codec::UserError;
use crate::codec::UserError::*;
use crate::frame::{self, Frame, FrameSize};
use crate::hpack;
use bytes::{Buf, BufMut, BytesMut};
use futures::*;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::{AsyncRead, AsyncWrite, try_nb};
use std::io::{self, Cursor};
@@ -94,7 +94,7 @@ where
// Ensure that we have enough capacity to accept the write.
assert!(self.has_capacity());
debug!("send; frame={:?}", item);
log::debug!("send; frame={:?}", item);
match item {
Frame::Data(mut v) => {
@@ -136,31 +136,31 @@ where
},
Frame::Settings(v) => {
v.encode(self.buf.get_mut());
trace!("encoded settings; rem={:?}", self.buf.remaining());
log::trace!("encoded settings; rem={:?}", self.buf.remaining());
},
Frame::GoAway(v) => {
v.encode(self.buf.get_mut());
trace!("encoded go_away; rem={:?}", self.buf.remaining());
log::trace!("encoded go_away; rem={:?}", self.buf.remaining());
},
Frame::Ping(v) => {
v.encode(self.buf.get_mut());
trace!("encoded ping; rem={:?}", self.buf.remaining());
log::trace!("encoded ping; rem={:?}", self.buf.remaining());
},
Frame::WindowUpdate(v) => {
v.encode(self.buf.get_mut());
trace!("encoded window_update; rem={:?}", self.buf.remaining());
log::trace!("encoded window_update; rem={:?}", self.buf.remaining());
},
Frame::Priority(_) => {
/*
v.encode(self.buf.get_mut());
trace!("encoded priority; rem={:?}", self.buf.remaining());
log::trace!("encoded priority; rem={:?}", self.buf.remaining());
*/
unimplemented!();
},
Frame::Reset(v) => {
v.encode(self.buf.get_mut());
trace!("encoded reset; rem={:?}", self.buf.remaining());
log::trace!("encoded reset; rem={:?}", self.buf.remaining());
},
}
@@ -169,18 +169,18 @@ where
/// Flush buffered data to the wire
pub fn flush(&mut self) -> Poll<(), io::Error> {
trace!("flush");
log::trace!("flush");
loop {
while !self.is_empty() {
match self.next {
Some(Next::Data(ref mut frame)) => {
trace!(" -> queued data frame");
log::trace!(" -> queued data frame");
let mut buf = Buf::by_ref(&mut self.buf).chain(frame.payload_mut());
try_ready!(self.inner.write_buf(&mut buf));
},
_ => {
trace!(" -> not a queued data frame");
log::trace!(" -> not a queued data frame");
try_ready!(self.inner.write_buf(&mut self.buf));
},
}
@@ -220,7 +220,7 @@ where
}
}
trace!("flushing buffer");
log::trace!("flushing buffer");
// Flush the upstream
try_nb!(self.inner.flush());

View File

@@ -10,7 +10,7 @@ pub use self::error::{RecvError, SendError, UserError};
use self::framed_read::FramedRead;
use self::framed_write::FramedWrite;
use frame::{self, Data, Frame};
use crate::frame::{self, Data, Frame};
use futures::*;

View File

@@ -1,9 +1,9 @@
use codec::{SendError, UserError};
use proto;
use crate::codec::{SendError, UserError};
use crate::proto;
use std::{error, fmt, io};
pub use frame::Reason;
pub use crate::frame::Reason;
/// Represents HTTP/2.0 operation errors.
///
@@ -77,7 +77,7 @@ impl Error {
impl From<proto::Error> for Error {
fn from(src: proto::Error) -> Error {
use proto::Error::*;
use crate::proto::Error::*;
Error {
kind: match src {

View File

@@ -1,5 +1,5 @@
use bytes::{Buf, BufMut, Bytes};
use frame::{util, Error, Frame, Head, Kind, StreamId};
use crate::frame::{util, Error, Frame, Head, Kind, StreamId};
use std::fmt;

View File

@@ -1,4 +1,4 @@
use frame::{self, Error, Head, Kind, Reason, StreamId};
use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
use bytes::{BufMut};
@@ -39,7 +39,7 @@ impl GoAway {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
trace!("encoding GO_AWAY; code={:?}", self.error_code);
log::trace!("encoding GO_AWAY; code={:?}", self.error_code);
let head = Head::new(Kind::GoAway, 0, StreamId::zero());
head.encode(8, dst);
dst.put_u32_be(self.last_stream_id.into());

View File

@@ -1,6 +1,6 @@
use super::{util, StreamDependency, StreamId};
use frame::{Error, Frame, Head, Kind};
use hpack;
use crate::frame::{Error, Frame, Head, Kind};
use crate::hpack;
use http::{uri, HeaderMap, Method, StatusCode, Uri};
use http::header::{self, HeaderName, HeaderValue};
@@ -153,7 +153,7 @@ impl Headers {
let flags = HeadersFlag(head.flag());
let mut pad = 0;
trace!("loading headers; flags={:?}", flags);
log::trace!("loading headers; flags={:?}", flags);
// Read the padding length
if flags.is_padded() {
@@ -583,7 +583,7 @@ impl Iterator for Iter {
type Item = hpack::Header<Option<HeaderName>>;
fn next(&mut self) -> Option<Self::Item> {
use hpack::Header::*;
use crate::hpack::Header::*;
if let Some(ref mut pseudo) = self.pseudo {
if let Some(method) = pseudo.method.take() {
@@ -736,10 +736,10 @@ impl HeaderBlock {
macro_rules! set_pseudo {
($field:ident, $val:expr) => {{
if reg {
trace!("load_hpack; header malformed -- pseudo not at head of block");
log::trace!("load_hpack; header malformed -- pseudo not at head of block");
malformed = true;
} else if self.pseudo.$field.is_some() {
trace!("load_hpack; header malformed -- repeated pseudo");
log::trace!("load_hpack; header malformed -- repeated pseudo");
malformed = true;
} else {
let __val = $val;
@@ -747,7 +747,7 @@ impl HeaderBlock {
if headers_size < max_header_list_size {
self.pseudo.$field = Some(__val);
} else if !self.is_over_size {
trace!("load_hpack; header list size over max");
log::trace!("load_hpack; header list size over max");
self.is_over_size = true;
}
}
@@ -761,7 +761,7 @@ impl HeaderBlock {
// the hpack state is connection level. In order to maintain correct
// state for other streams, the hpack decoding process must complete.
let res = decoder.decode(&mut cursor, |header| {
use hpack::Header::*;
use crate::hpack::Header::*;
match header {
Field {
@@ -777,10 +777,10 @@ impl HeaderBlock {
|| name == "keep-alive"
|| name == "proxy-connection"
{
trace!("load_hpack; connection level header");
log::trace!("load_hpack; connection level header");
malformed = true;
} else if name == header::TE && value != "trailers" {
trace!("load_hpack; TE header not set to trailers; val={:?}", value);
log::trace!("load_hpack; TE header not set to trailers; val={:?}", value);
malformed = true;
} else {
reg = true;
@@ -789,7 +789,7 @@ impl HeaderBlock {
if headers_size < max_header_list_size {
self.fields.append(name, value);
} else if !self.is_over_size {
trace!("load_hpack; header list size over max");
log::trace!("load_hpack; header list size over max");
self.is_over_size = true;
}
}
@@ -803,12 +803,12 @@ impl HeaderBlock {
});
if let Err(e) = res {
trace!("hpack decoding error; err={:?}", e);
log::trace!("hpack decoding error; err={:?}", e);
return Err(e.into());
}
if malformed {
trace!("malformed message");
log::trace!("malformed message");
return Err(Error::MalformedMessage.into());
}

View File

@@ -1,4 +1,4 @@
use hpack;
use crate::hpack;
use bytes::Bytes;

View File

@@ -1,5 +1,5 @@
use bytes::{Buf, BufMut, IntoBuf};
use frame::{Error, Frame, Head, Kind, StreamId};
use crate::frame::{Error, Frame, Head, Kind, StreamId};
const ACK_FLAG: u8 = 0x1;
@@ -58,7 +58,7 @@ impl Ping {
/// Builds a `Ping` frame from a raw frame.
pub fn load(head: Head, bytes: &[u8]) -> Result<Ping, Error> {
debug_assert_eq!(head.kind(), ::frame::Kind::Ping);
debug_assert_eq!(head.kind(), crate::frame::Kind::Ping);
// PING frames are not associated with any individual stream. If a PING
// frame is received with a stream identifier field value other than
@@ -92,7 +92,7 @@ impl Ping {
pub fn encode<B: BufMut>(&self, dst: &mut B) {
let sz = self.payload.len();
trace!("encoding PING; ack={} len={}", self.ack, sz);
log::trace!("encoding PING; ack={} len={}", self.ack, sz);
let flags = if self.ack { ACK_FLAG } else { 0 };
let head = Head::new(Kind::Ping, flags, StreamId::zero());

View File

@@ -1,4 +1,4 @@
use frame::*;
use crate::frame::*;
#[derive(Debug, Eq, PartialEq)]
pub struct Priority {

View File

@@ -1,4 +1,4 @@
use frame::{self, Error, Head, Kind, Reason, StreamId};
use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
use bytes::{BufMut};
@@ -38,7 +38,7 @@ impl Reset {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
trace!(
log::trace!(
"encoding RESET; id={:?} code={:?}",
self.stream_id,
self.error_code

View File

@@ -1,7 +1,7 @@
use std::fmt;
use bytes::{BufMut, BytesMut};
use frame::{util, Error, Frame, FrameSize, Head, Kind, StreamId};
use crate::frame::{util, Error, Frame, FrameSize, Head, Kind, StreamId};
#[derive(Clone, Default, Eq, PartialEq)]
pub struct Settings {
@@ -110,7 +110,7 @@ impl Settings {
pub fn load(head: Head, payload: &[u8]) -> Result<Settings, Error> {
use self::Setting::*;
debug_assert_eq!(head.kind(), ::frame::Kind::Settings);
debug_assert_eq!(head.kind(), crate::frame::Kind::Settings);
if !head.stream_id().is_zero() {
return Err(Error::InvalidStreamId);
@@ -131,7 +131,7 @@ impl Settings {
// Ensure the payload length is correct, each setting is 6 bytes long.
if payload.len() % 6 != 0 {
debug!("invalid settings payload length; len={:?}", payload.len());
log::debug!("invalid settings payload length; len={:?}", payload.len());
return Err(Error::InvalidPayloadAckSettings);
}
@@ -187,13 +187,13 @@ impl Settings {
let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero());
let payload_len = self.payload_len();
trace!("encoding SETTINGS; len={}", payload_len);
log::trace!("encoding SETTINGS; len={}", payload_len);
head.encode(payload_len, dst);
// Encode the settings
self.for_each(|setting| {
trace!("encoding setting; val={:?}", setting);
log::trace!("encoding setting; val={:?}", setting);
setting.encode(dst)
});
}

View File

@@ -1,4 +1,4 @@
use frame::{self, Error, Head, Kind, StreamId};
use crate::frame::{self, Error, Head, Kind, StreamId};
use bytes::{BufMut};
@@ -28,7 +28,7 @@ impl WindowUpdate {
/// Builds a `WindowUpdate` frame from a raw frame.
pub fn load(head: Head, payload: &[u8]) -> Result<WindowUpdate, Error> {
debug_assert_eq!(head.kind(), ::frame::Kind::WindowUpdate);
debug_assert_eq!(head.kind(), crate::frame::Kind::WindowUpdate);
if payload.len() != 4 {
return Err(Error::BadFrameSize);
}
@@ -48,7 +48,7 @@ impl WindowUpdate {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
log::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
let head = Head::new(Kind::WindowUpdate, 0, self.stream_id);
head.encode(4, dst);
dst.put_u32_be(self.size_increment);

View File

@@ -1,5 +1,5 @@
use super::{huffman, Header};
use frame;
use crate::frame;
use bytes::{Buf, Bytes, BytesMut};
use http::header;
@@ -180,7 +180,7 @@ impl Decoder {
self.last_max_update = size;
}
trace!("decode");
log::trace!("decode");
while let Some(ty) = peek_u8(src) {
// At this point we are always at the beginning of the next block
@@ -188,14 +188,14 @@ impl Decoder {
// determined from the first byte.
match Representation::load(ty)? {
Indexed => {
trace!(" Indexed; rem={:?}", src.remaining());
log::trace!(" Indexed; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_indexed(src)?;
consume(src);
f(entry);
},
LiteralWithIndexing => {
trace!(" LiteralWithIndexing; rem={:?}", src.remaining());
log::trace!(" LiteralWithIndexing; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, true)?;
@@ -206,14 +206,14 @@ impl Decoder {
f(entry);
},
LiteralWithoutIndexing => {
trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining());
log::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, false)?;
consume(src);
f(entry);
},
LiteralNeverIndexed => {
trace!(" LiteralNeverIndexed; rem={:?}", src.remaining());
log::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, false)?;
consume(src);
@@ -223,7 +223,7 @@ impl Decoder {
f(entry);
},
SizeUpdate => {
trace!(" SizeUpdate; rem={:?}", src.remaining());
log::trace!(" SizeUpdate; rem={:?}", src.remaining());
if !can_resize {
return Err(DecoderError::InvalidMaxDynamicSize);
}
@@ -245,7 +245,7 @@ impl Decoder {
return Err(DecoderError::InvalidMaxDynamicSize);
}
debug!(
log::debug!(
"Decoder changed max table size from {} to {}",
self.table.size(),
new_size
@@ -299,7 +299,7 @@ impl Decoder {
let len = decode_int(buf, 7)?;
if len > buf.remaining() {
trace!(
log::trace!(
"decode_string underflow; len={}; remaining={}",
len,
buf.remaining()
@@ -788,7 +788,7 @@ fn from_static(s: &'static str) -> String<Bytes> {
#[cfg(test)]
mod test {
use super::*;
use hpack::Header;
use crate::hpack::Header;
#[test]
fn test_peek_u8() {

View File

@@ -413,7 +413,7 @@ fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool {
#[cfg(test)]
mod test {
use super::*;
use hpack::Header;
use crate::hpack::Header;
use http::*;
#[test]

View File

@@ -1,7 +1,7 @@
mod table;
use self::table::{DECODE_TABLE, ENCODE_TABLE};
use hpack::{DecoderError, EncoderError};
use crate::hpack::{DecoderError, EncoderError};
use bytes::{BufMut, BytesMut};

View File

@@ -1,12 +1,8 @@
extern crate bytes;
extern crate hex;
extern crate serde_json;
use crate::hpack::{Decoder, Encoder, Header};
use hpack::{Decoder, Encoder, Header};
use self::bytes::BytesMut;
use self::hex::FromHex;
use self::serde_json::Value;
use bytes::BytesMut;
use hex::FromHex;
use serde_json::Value;
use std::fs::File;
use std::io::Cursor;

View File

@@ -1,15 +1,10 @@
extern crate bytes;
extern crate env_logger;
extern crate quickcheck;
extern crate rand;
use hpack::{Decoder, Encode, Encoder, Header};
use crate::hpack::{Decoder, Encode, Encoder, Header};
use http::header::{HeaderName, HeaderValue};
use self::bytes::{Bytes, BytesMut};
use self::quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
use self::rand::{Rng, SeedableRng, StdRng};
use bytes::{Bytes, BytesMut};
use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
use rand::{Rng, SeedableRng, StdRng};
use std::io::Cursor;

View File

@@ -88,35 +88,12 @@
#![deny(missing_debug_implementations, missing_docs)]
#![cfg_attr(test, deny(warnings))]
#[macro_use]
extern crate futures;
#[macro_use]
extern crate tokio_io;
// HTTP types
extern crate http;
// Buffer utilities
extern crate bytes;
// Hash function used for HPACK encoding and tracking stream states.
extern crate fnv;
extern crate byteorder;
extern crate slab;
#[macro_use]
extern crate log;
extern crate string;
extern crate indexmap;
macro_rules! proto_err {
(conn: $($msg:tt)+) => {
debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
log::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
};
(stream: $($msg:tt)+) => {
debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
log::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
};
}
@@ -137,8 +114,8 @@ pub mod client;
pub mod server;
mod share;
pub use error::{Error, Reason};
pub use share::{SendStream, StreamId, RecvStream, ReleaseCapacity, PingPong, Ping, Pong};
pub use crate::error::{Error, Reason};
pub use crate::share::{SendStream, StreamId, RecvStream, ReleaseCapacity, PingPong, Ping, Pong};
#[cfg(feature = "unstable")]
pub use codec::{Codec, RecvError, SendError, UserError};

View File

@@ -1,12 +1,12 @@
use {client, frame, proto, server};
use codec::RecvError;
use frame::{Reason, StreamId};
use crate::{client, frame, proto, server};
use crate::codec::RecvError;
use crate::frame::{Reason, StreamId};
use frame::DEFAULT_INITIAL_WINDOW_SIZE;
use proto::*;
use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE;
use crate::proto::*;
use bytes::{Bytes, IntoBuf};
use futures::Stream;
use futures::{Stream, try_ready};
use tokio_io::{AsyncRead, AsyncWrite};
use std::marker::PhantomData;
@@ -193,7 +193,7 @@ where
/// Advances the internal state of the connection.
pub fn poll(&mut self) -> Poll<(), proto::Error> {
use codec::RecvError::*;
use crate::codec::RecvError::*;
loop {
// TODO: probably clean up this glob of code
@@ -223,13 +223,13 @@ where
// error. This is handled by setting a GOAWAY frame followed by
// terminating the connection.
Err(Connection(e)) => {
debug!("Connection::poll; connection error={:?}", e);
log::debug!("Connection::poll; connection error={:?}", e);
// We may have already sent a GOAWAY for this error,
// if so, don't send another, just flush and close up.
if let Some(reason) = self.go_away.going_away_reason() {
if reason == e {
trace!(" -> already going away");
log::trace!(" -> already going away");
self.state = State::Closing(e);
continue;
}
@@ -246,7 +246,7 @@ where
id,
reason,
}) => {
trace!("stream error; id={:?}; reason={:?}", id, reason);
log::trace!("stream error; id={:?}; reason={:?}", id, reason);
self.streams.send_reset(id, reason);
},
// Attempting to read a frame resulted in an I/O error. All
@@ -254,7 +254,7 @@ where
//
// TODO: Are I/O errors recoverable?
Err(Io(e)) => {
debug!("Connection::poll; IO error={:?}", e);
log::debug!("Connection::poll; IO error={:?}", e);
let e = e.into();
// Reset all active streams
@@ -266,7 +266,7 @@ where
}
}
State::Closing(reason) => {
trace!("connection closing after flush");
log::trace!("connection closing after flush");
// Flush/shutdown the codec
try_ready!(self.codec.shutdown());
@@ -279,7 +279,7 @@ where
}
fn poll2(&mut self) -> Poll<(), RecvError> {
use frame::Frame::*;
use crate::frame::Frame::*;
// This happens outside of the loop to prevent needing to do a clock
// check and then comparison of the queue possibly multiple times a
@@ -309,27 +309,27 @@ where
match try_ready!(self.codec.poll()) {
Some(Headers(frame)) => {
trace!("recv HEADERS; frame={:?}", frame);
log::trace!("recv HEADERS; frame={:?}", frame);
self.streams.recv_headers(frame)?;
},
Some(Data(frame)) => {
trace!("recv DATA; frame={:?}", frame);
log::trace!("recv DATA; frame={:?}", frame);
self.streams.recv_data(frame)?;
},
Some(Reset(frame)) => {
trace!("recv RST_STREAM; frame={:?}", frame);
log::trace!("recv RST_STREAM; frame={:?}", frame);
self.streams.recv_reset(frame)?;
},
Some(PushPromise(frame)) => {
trace!("recv PUSH_PROMISE; frame={:?}", frame);
log::trace!("recv PUSH_PROMISE; frame={:?}", frame);
self.streams.recv_push_promise(frame)?;
},
Some(Settings(frame)) => {
trace!("recv SETTINGS; frame={:?}", frame);
log::trace!("recv SETTINGS; frame={:?}", frame);
self.settings.recv_settings(frame);
},
Some(GoAway(frame)) => {
trace!("recv GOAWAY; frame={:?}", frame);
log::trace!("recv GOAWAY; frame={:?}", frame);
// This should prevent starting new streams,
// but should allow continuing to process current streams
// until they are all EOS. Once they are, State should
@@ -338,7 +338,7 @@ where
self.error = Some(frame.reason());
},
Some(Ping(frame)) => {
trace!("recv PING; frame={:?}", frame);
log::trace!("recv PING; frame={:?}", frame);
let status = self.ping_pong.recv_ping(frame);
if status.is_shutdown() {
assert!(
@@ -351,15 +351,15 @@ where
}
},
Some(WindowUpdate(frame)) => {
trace!("recv WINDOW_UPDATE; frame={:?}", frame);
log::trace!("recv WINDOW_UPDATE; frame={:?}", frame);
self.streams.recv_window_update(frame)?;
},
Some(Priority(frame)) => {
trace!("recv PRIORITY; frame={:?}", frame);
log::trace!("recv PRIORITY; frame={:?}", frame);
// TODO: handle
},
None => {
trace!("codec closed");
log::trace!("codec closed");
self.streams.recv_eof(false)
.ok().expect("mutex poisoned");
return Ok(Async::Ready(()));

View File

@@ -1,5 +1,5 @@
use codec::{RecvError, SendError};
use frame::Reason;
use crate::codec::{RecvError, SendError};
use crate::frame::Reason;
use std::io;

View File

@@ -1,5 +1,5 @@
use codec::Codec;
use frame::{self, Reason, StreamId};
use crate::codec::Codec;
use crate::frame::{self, Reason, StreamId};
use bytes::Buf;
use futures::{Async, Poll};

View File

@@ -13,13 +13,13 @@ pub(crate) use self::ping_pong::UserPings;
pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams};
pub(crate) use self::streams::{PollReset, Prioritized, Open};
use codec::Codec;
use crate::codec::Codec;
use self::go_away::GoAway;
use self::ping_pong::PingPong;
use self::settings::Settings;
use frame::{self, Frame};
use crate::frame::{self, Frame};
use futures::{task, Async, Poll};
use futures::task::Task;

View File

@@ -1,7 +1,7 @@
use codec::RecvError;
use error::Reason;
use frame::{Pseudo, StreamId};
use proto::Open;
use crate::codec::RecvError;
use crate::error::Reason;
use crate::frame::{Pseudo, StreamId};
use crate::proto::Open;
use http::{HeaderMap, Request, Response};
@@ -12,7 +12,7 @@ pub(crate) trait Peer {
/// Message type polled from the transport
type Poll: fmt::Debug;
fn dyn() -> Dyn;
fn r#dyn() -> Dyn;
fn is_server() -> bool;
@@ -57,10 +57,10 @@ impl Dyn {
&self, pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId
) -> Result<PollMessage, RecvError> {
if self.is_server() {
::server::Peer::convert_poll_message(pseudo, fields, stream_id)
crate::server::Peer::convert_poll_message(pseudo, fields, stream_id)
.map(PollMessage::Server)
} else {
::client::Peer::convert_poll_message(pseudo, fields, stream_id)
crate::client::Peer::convert_poll_message(pseudo, fields, stream_id)
.map(PollMessage::Client)
}
}

View File

@@ -1,6 +1,6 @@
use codec::Codec;
use frame::Ping;
use proto::{self, PingPayload};
use crate::codec::Codec;
use crate::frame::Ping;
use crate::proto::{self, PingPayload};
use bytes::Buf;
use futures::{Async, Poll};
@@ -107,7 +107,7 @@ impl PingPong {
&Ping::SHUTDOWN,
"pending_ping should be for shutdown",
);
trace!("recv PING SHUTDOWN ack");
log::trace!("recv PING SHUTDOWN ack");
return ReceivedPing::Shutdown;
}
@@ -117,7 +117,7 @@ impl PingPong {
if let Some(ref users) = self.user_pings {
if ping.payload() == &Ping::USER && users.receive_pong() {
trace!("recv PING USER ack");
log::trace!("recv PING USER ack");
return ReceivedPing::Unknown;
}
}
@@ -125,7 +125,7 @@ impl PingPong {
// else we were acked a ping we didn't send?
// The spec doesn't require us to do anything about this,
// so for resiliency, just ignore it for now.
warn!("recv PING ack that we never sent: {:?}", ping);
log::warn!("recv PING ack that we never sent: {:?}", ping);
ReceivedPing::Unknown
} else {
// Save the ping's payload to be sent as an acknowledgement.

View File

@@ -1,6 +1,6 @@
use codec::RecvError;
use frame;
use proto::*;
use crate::codec::RecvError;
use crate::frame;
use crate::proto::*;
#[derive(Debug)]
pub(crate) struct Settings {
@@ -19,7 +19,7 @@ impl Settings {
pub fn recv_settings(&mut self, frame: frame::Settings) {
if frame.is_ack() {
debug!("received remote settings ack");
log::debug!("received remote settings ack");
// TODO: handle acks
} else {
assert!(self.pending.is_none());
@@ -38,11 +38,11 @@ impl Settings {
C: Buf,
P: Peer,
{
trace!("send_pending_ack; pending={:?}", self.pending);
log::trace!("send_pending_ack; pending={:?}", self.pending);
if let Some(ref settings) = self.pending {
if !dst.poll_ready()?.is_ready() {
trace!("failed to send ACK");
log::trace!("failed to send ACK");
return Ok(Async::NotReady);
}
@@ -54,7 +54,7 @@ impl Settings {
.ok()
.expect("invalid settings frame");
trace!("ACK sent; applying settings");
log::trace!("ACK sent; applying settings");
if let Some(val) = settings.max_frame_size() {
dst.set_max_send_frame_size(val as usize);

View File

@@ -133,7 +133,7 @@ impl Counts {
// TODO: move this to macro?
pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) {
trace!("transition_after; stream={:?}; state={:?}; is_closed={:?}; \
log::trace!("transition_after; stream={:?}; state={:?}; is_closed={:?}; \
pending_send_empty={:?}; buffered_send_data={}; \
num_recv={}; num_send={}",
stream.id,
@@ -153,7 +153,7 @@ impl Counts {
}
if stream.is_counted {
trace!("dec_num_streams; stream={:?}", stream.id);
log::trace!("dec_num_streams; stream={:?}", stream.id);
// Decrement the number of active streams.
self.dec_num_streams(&mut stream);
}

View File

@@ -1,5 +1,5 @@
use frame::Reason;
use proto::{WindowSize, MAX_WINDOW_SIZE};
use crate::frame::Reason;
use crate::proto::{WindowSize, MAX_WINDOW_SIZE};
use std::fmt;
@@ -120,7 +120,7 @@ impl FlowControl {
return Err(Reason::FLOW_CONTROL_ERROR);
}
trace!(
log::trace!(
"inc_window; sz={}; old={}; new={}",
sz,
self.window_size,
@@ -136,7 +136,7 @@ impl FlowControl {
/// This is called after receiving a SETTINGS frame with a lower
/// INITIAL_WINDOW_SIZE value.
pub fn dec_window(&mut self, sz: WindowSize) {
trace!(
log::trace!(
"dec_window; sz={}; window={}, available={}",
sz,
self.window_size,
@@ -149,7 +149,7 @@ impl FlowControl {
/// Decrements the window reflecting data has actually been sent. The caller
/// must ensure that the window has capacity.
pub fn send_data(&mut self, sz: WindowSize) {
trace!(
log::trace!(
"send_data; sz={}; window={}; available={}",
sz,
self.window_size,

View File

@@ -24,8 +24,8 @@ use self::state::State;
use self::store::Store;
use self::stream::Stream;
use frame::{StreamId, StreamIdOverflow};
use proto::*;
use crate::frame::{StreamId, StreamIdOverflow};
use crate::proto::*;
use bytes::Bytes;
use std::time::Duration;

View File

@@ -1,12 +1,13 @@
use super::*;
use super::store::Resolve;
use frame::{Reason, StreamId};
use crate::frame::{Reason, StreamId};
use codec::UserError;
use codec::UserError::*;
use crate::codec::UserError;
use crate::codec::UserError::*;
use bytes::buf::Take;
use futures::try_ready;
use std::{cmp, fmt, mem};
use std::io;
@@ -85,7 +86,7 @@ impl Prioritize {
flow.assign_capacity(config.remote_init_window_sz);
trace!("Prioritize::new; flow={:?}", flow);
log::trace!("Prioritize::new; flow={:?}", flow);
Prioritize {
pending_send: store::Queue::new(),
@@ -113,7 +114,7 @@ impl Prioritize {
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Task>) {
// If the stream is waiting to be opened, nothing more to do.
if !stream.is_pending_open {
trace!("schedule_send; {:?}", stream.id);
log::trace!("schedule_send; {:?}", stream.id);
// Queue the stream
self.pending_send.push(stream);
@@ -159,7 +160,7 @@ impl Prioritize {
// Update the buffered data counter
stream.buffered_send_data += sz;
trace!(
log::trace!(
"send_data; sz={}; buffered={}; requested={}",
sz,
stream.buffered_send_data,
@@ -180,7 +181,7 @@ impl Prioritize {
self.reserve_capacity(0, stream, counts);
}
trace!(
log::trace!(
"send_data (2); available={}; buffered={}",
stream.send_flow.available(),
stream.buffered_send_data
@@ -216,7 +217,7 @@ impl Prioritize {
capacity: WindowSize,
stream: &mut store::Ptr,
counts: &mut Counts) {
trace!(
log::trace!(
"reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}",
stream.id,
capacity,
@@ -268,7 +269,7 @@ impl Prioritize {
inc: WindowSize,
stream: &mut store::Ptr,
) -> Result<(), Reason> {
trace!(
log::trace!(
"recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}",
stream.id,
stream.state,
@@ -328,7 +329,7 @@ impl Prioritize {
pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) {
while let Some(stream) = self.pending_capacity.pop(store) {
counts.transition(stream, |_, stream| {
trace!("clear_pending_capacity; stream={:?}", stream.id);
log::trace!("clear_pending_capacity; stream={:?}", stream.id);
})
}
}
@@ -341,7 +342,7 @@ impl Prioritize {
where
R: Resolve,
{
trace!("assign_connection_capacity; inc={}", inc);
log::trace!("assign_connection_capacity; inc={}", inc);
self.flow.assign_capacity(inc);
@@ -385,7 +386,7 @@ impl Prioritize {
stream.send_flow.window_size() - stream.send_flow.available().as_size(),
);
trace!(
log::trace!(
"try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}",
stream.id,
total_requested,
@@ -418,7 +419,7 @@ impl Prioritize {
// TODO: Should prioritization factor into this?
let assign = cmp::min(conn_available, additional);
trace!(
log::trace!(
" assigning; stream={:?}, capacity={}",
stream.id,
assign,
@@ -431,7 +432,7 @@ impl Prioritize {
self.flow.claim_capacity(assign);
}
trace!(
log::trace!(
"try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}",
stream.send_flow.available(),
stream.requested_send_capacity,
@@ -500,14 +501,14 @@ impl Prioritize {
// The max frame length
let max_frame_len = dst.max_send_frame_size();
trace!("poll_complete");
log::trace!("poll_complete");
loop {
self.schedule_pending_open(store, counts);
match self.pop_frame(buffer, store, max_frame_len, counts) {
Some(frame) => {
trace!("writing frame={:?}", frame);
log::trace!("writing frame={:?}", frame);
debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing);
if let Frame::Data(ref frame) = frame {
@@ -553,11 +554,11 @@ impl Prioritize {
where
B: Buf,
{
trace!("try reclaim frame");
log::trace!("try reclaim frame");
// First check if there are any data chunks to take back
if let Some(frame) = dst.take_last_data_frame() {
trace!(
log::trace!(
" -> reclaimed; frame={:?}; sz={}",
frame,
frame.payload().inner.get_ref().remaining()
@@ -569,7 +570,7 @@ impl Prioritize {
match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) {
InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"),
InFlightData::Drop => {
trace!("not reclaiming frame for cancelled stream");
log::trace!("not reclaiming frame for cancelled stream");
return false;
}
InFlightData::DataFrame(k) => {
@@ -617,11 +618,11 @@ impl Prioritize {
}
pub fn clear_queue<B>(&mut self, buffer: &mut Buffer<Frame<B>>, stream: &mut store::Ptr) {
trace!("clear_queue; stream={:?}", stream.id);
log::trace!("clear_queue; stream={:?}", stream.id);
// TODO: make this more efficient?
while let Some(frame) = stream.pending_send.pop_front(buffer) {
trace!("dropping; frame={:?}", frame);
log::trace!("dropping; frame={:?}", frame);
}
stream.buffered_send_data = 0;
@@ -658,12 +659,12 @@ impl Prioritize {
where
B: Buf,
{
trace!("pop_frame");
log::trace!("pop_frame");
loop {
match self.pending_send.pop(store) {
Some(mut stream) => {
trace!("pop_frame; stream={:?}; stream.state={:?}",
log::trace!("pop_frame; stream={:?}; stream.state={:?}",
stream.id, stream.state);
// It's possible that this stream, besides having data to send,
@@ -673,7 +674,7 @@ impl Prioritize {
// To be safe, we just always ask the stream.
let is_pending_reset = stream.is_pending_reset_expiration();
trace!(" --> stream={:?}; is_pending_reset={:?};",
log::trace!(" --> stream={:?}; is_pending_reset={:?};",
stream.id, is_pending_reset);
let frame = match stream.pending_send.pop_front(buffer) {
@@ -683,7 +684,7 @@ impl Prioritize {
let stream_capacity = stream.send_flow.available();
let sz = frame.payload().remaining();
trace!(
log::trace!(
" --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \
available={}; requested={}; buffered={};",
frame.stream_id(),
@@ -698,7 +699,7 @@ impl Prioritize {
// Zero length data frames always have capacity to
// be sent.
if sz > 0 && stream_capacity == 0 {
trace!(
log::trace!(
" --> stream capacity is 0; requested={}",
stream.requested_send_capacity
);
@@ -730,10 +731,10 @@ impl Prioritize {
// capacity at this point.
debug_assert!(len <= self.flow.window_size());
trace!(" --> sending data frame; len={}", len);
log::trace!(" --> sending data frame; len={}", len);
// Update the flow control
trace!(" -- updating stream flow --");
log::trace!(" -- updating stream flow --");
stream.send_flow.send_data(len);
// Decrement the stream's buffered data counter
@@ -746,7 +747,7 @@ impl Prioritize {
// line.
self.flow.assign_capacity(len);
trace!(" -- updating connection flow --");
log::trace!(" -- updating connection flow --");
self.flow.send_data(len);
// Wrap the frame's data payload to ensure that the
@@ -784,7 +785,7 @@ impl Prioritize {
// had data buffered to be sent, but all the frames are cleared
// in clear_queue(). Instead of doing O(N) traversal through queue
// to remove, lets just ignore the stream here.
trace!("removing dangling stream from pending_send");
log::trace!("removing dangling stream from pending_send");
// Since this should only happen as a consequence of `clear_queue`,
// we must be in a closed state of some kind.
debug_assert!(stream.state.is_closed());
@@ -794,7 +795,7 @@ impl Prioritize {
}
};
trace!("pop_frame; frame={:?}", frame);
log::trace!("pop_frame; frame={:?}", frame);
if cfg!(debug_assertions) && stream.state.is_idle() {
debug_assert!(stream.id > self.last_opened_id);
@@ -819,11 +820,11 @@ impl Prioritize {
}
fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) {
trace!("schedule_pending_open");
log::trace!("schedule_pending_open");
// check for any pending open streams
while counts.can_inc_num_send_streams() {
if let Some(mut stream) = self.pending_open.pop(store) {
trace!("schedule_pending_open; stream={:?}", stream.id);
log::trace!("schedule_pending_open; stream={:?}", stream.id);
counts.inc_num_send_streams(&mut stream);
self.pending_send.push(&mut stream);

View File

@@ -1,9 +1,10 @@
use super::*;
use {frame, proto};
use codec::{RecvError, UserError};
use frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE};
use crate::{frame, proto};
use crate::codec::{RecvError, UserError};
use crate::frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE};
use http::{HeaderMap, Response, Request, Method};
use futures::try_ready;
use std::io;
use std::time::{Duration, Instant};
@@ -158,7 +159,7 @@ impl Recv {
stream: &mut store::Ptr,
counts: &mut Counts,
) -> Result<(), RecvHeaderBlockError<Option<frame::Headers>>> {
trace!("opening stream; init_window={}", self.init_window_sz);
log::trace!("opening stream; init_window={}", self.init_window_sz);
let is_initial = stream.state.recv_open(frame.is_end_stream())?;
if is_initial {
@@ -203,7 +204,7 @@ impl Recv {
// So, if peer is a server, we'll send a 431. In either case,
// an error is recorded, which will send a REFUSED_STREAM,
// since we don't want any of the data frames either.
debug!(
log::debug!(
"stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \
recv_headers: frame is over size; stream={:?}",
stream.id
@@ -340,7 +341,7 @@ impl Recv {
capacity: WindowSize,
task: &mut Option<Task>,
) {
trace!(
log::trace!(
"release_connection_capacity; size={}, connection in_flight_data={}",
capacity,
self.in_flight_data,
@@ -366,7 +367,7 @@ impl Recv {
stream: &mut store::Ptr,
task: &mut Option<Task>,
) -> Result<(), UserError> {
trace!("release_capacity; size={}", capacity);
log::trace!("release_capacity; size={}", capacity);
if capacity > stream.in_flight_recv_data {
return Err(UserError::ReleaseCapacityTooBig);
@@ -405,7 +406,7 @@ impl Recv {
return;
}
trace!(
log::trace!(
"auto-release closed stream ({:?}) capacity: {:?}",
stream.id,
stream.in_flight_recv_data,
@@ -433,7 +434,7 @@ impl Recv {
/// The `task` is an optional parked task for the `Connection` that might
/// be blocked on needing more window capacity.
pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option<Task>) {
trace!(
log::trace!(
"set_target_connection_window; target={}; available={}, reserved={}",
target,
self.flow.available(),
@@ -509,7 +510,7 @@ impl Recv {
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}
trace!(
log::trace!(
"recv_data; size={}; connection={}; stream={}",
sz,
self.flow.window_size(),
@@ -518,7 +519,7 @@ impl Recv {
if is_ignoring_frame {
trace!(
log::trace!(
"recv_data; frame ignored on locally reset {:?} for some time",
stream.id,
);
@@ -608,7 +609,7 @@ impl Recv {
pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> {
if self.flow.window_size() < sz {
debug!(
log::debug!(
"connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});",
self.flow.window_size(),
sz,
@@ -642,7 +643,7 @@ impl Recv {
// So, if peer is a server, we'll send a 431. In either case,
// an error is recorded, which will send a REFUSED_STREAM,
// since we don't want any of the data frames either.
debug!(
log::debug!(
"stream error REFUSED_STREAM -- recv_push_promise: \
headers frame is over size; promised_id={:?};",
frame.promised_id(),
@@ -656,7 +657,7 @@ impl Recv {
let promised_id = frame.promised_id();
use http::header;
let (pseudo, fields) = frame.into_parts();
let req = ::server::Peer::convert_poll_message(pseudo, fields, promised_id)?;
let req = crate::server::Peer::convert_poll_message(pseudo, fields, promised_id)?;
// The spec has some requirements for promised request headers
// [https://httpwg.org/specs/rfc7540.html#PushRequests]
@@ -708,7 +709,7 @@ impl Recv {
pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> {
if let Ok(next) = self.next_stream_id {
if id >= next {
debug!("stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id);
log::debug!("stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id);
return Err(Reason::PROTOCOL_ERROR);
}
}
@@ -803,7 +804,7 @@ impl Recv {
return;
}
trace!("enqueue_reset_expiration; {:?}", stream.id);
log::trace!("enqueue_reset_expiration; {:?}", stream.id);
if !counts.can_inc_num_reset_streams() {
// try to evict 1 stream if possible
@@ -873,7 +874,7 @@ impl Recv {
fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) {
while let Some(stream) = self.pending_window_updates.pop(store) {
counts.transition(stream, |_, stream| {
trace!("clear_stream_window_update_queue; stream={:?}", stream.id);
log::trace!("clear_stream_window_update_queue; stream={:?}", stream.id);
})
}
}
@@ -962,7 +963,7 @@ impl Recv {
};
counts.transition(stream, |_, stream| {
trace!("pending_window_updates -- pop; stream={:?}", stream.id);
log::trace!("pending_window_updates -- pop; stream={:?}", stream.id);
debug_assert!(!stream.is_pending_window_update);
if !stream.state.is_recv_streaming() {

View File

@@ -1,5 +1,5 @@
use codec::{RecvError, UserError};
use frame::{self, Reason};
use crate::codec::{RecvError, UserError};
use crate::frame::{self, Reason};
use super::{
store, Buffer, Codec, Config, Counts, Frame, Prioritize,
Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize,
@@ -62,7 +62,7 @@ impl Send {
counts: &mut Counts,
task: &mut Option<Task>,
) -> Result<(), UserError> {
trace!(
log::trace!(
"send_headers; frame={:?}; init_window={:?}",
frame,
self.init_window_sz
@@ -75,11 +75,11 @@ impl Send {
|| frame.fields().contains_key("keep-alive")
|| frame.fields().contains_key("proxy-connection")
{
debug!("illegal connection-specific headers found");
log::debug!("illegal connection-specific headers found");
return Err(UserError::MalformedHeaders);
} else if let Some(te) = frame.fields().get(http::header::TE) {
if te != "trailers" {
debug!("illegal connection-specific headers found");
log::debug!("illegal connection-specific headers found");
return Err(UserError::MalformedHeaders);
}
@@ -121,7 +121,7 @@ impl Send {
let is_closed = stream.state.is_closed();
let is_empty = stream.pending_send.is_empty();
trace!(
log::trace!(
"send_reset(..., reason={:?}, stream={:?}, ..., \
is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \
state={:?} \
@@ -136,7 +136,7 @@ impl Send {
if is_reset {
// Don't double reset
trace!(
log::trace!(
" -> not sending RST_STREAM ({:?} is already reset)",
stream.id
);
@@ -149,7 +149,7 @@ impl Send {
// If closed AND the send queue is flushed, then the stream cannot be
// reset explicitly, either. Implicit resets can still be queued.
if is_closed && is_empty {
trace!(
log::trace!(
" -> not sending explicit RST_STREAM ({:?} was closed \
and send queue was flushed)",
stream.id
@@ -165,7 +165,7 @@ impl Send {
let frame = frame::Reset::new(stream.id, reason);
trace!("send_reset -- queueing; frame={:?}", frame);
log::trace!("send_reset -- queueing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task);
self.prioritize.reclaim_all_capacity(stream, counts);
}
@@ -220,7 +220,7 @@ impl Send {
stream.state.send_close();
trace!("send_trailers -- queuing; frame={:?}", frame);
log::trace!("send_trailers -- queuing; frame={:?}", frame);
self.prioritize.queue_frame(frame.into(), buffer, stream, task);
// Release any excess capacity
@@ -286,7 +286,7 @@ impl Send {
&self,
stream: &mut Stream,
mode: PollReset,
) -> Poll<Reason, ::Error> {
) -> Poll<Reason, crate::Error> {
match stream.state.ensure_reason(mode)? {
Some(reason) => Ok(reason.into()),
None => {
@@ -315,7 +315,7 @@ impl Send {
task: &mut Option<Task>,
) -> Result<(), Reason> {
if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) {
debug!("recv_stream_window_update !!; err={:?}", e);
log::debug!("recv_stream_window_update !!; err={:?}", e);
self.send_reset(
Reason::FLOW_CONTROL_ERROR.into(),
@@ -370,7 +370,7 @@ impl Send {
if val < old_val {
// We must decrease the (remote) window on every open stream.
let dec = old_val - val;
trace!("decrementing all windows; dec={}", dec);
log::trace!("decrementing all windows; dec={}", dec);
let mut total_reclaimed = 0;
store.for_each(|mut stream| {
@@ -396,7 +396,7 @@ impl Send {
0
};
trace!(
log::trace!(
"decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}",
stream.id,
dec,

View File

@@ -1,9 +1,9 @@
use std::io;
use codec::{RecvError, UserError};
use codec::UserError::*;
use frame::Reason;
use proto::{self, PollReset};
use crate::codec::{RecvError, UserError};
use crate::codec::UserError::*;
use crate::frame::Reason;
use crate::proto::{self, PollReset};
use self::Inner::*;
use self::Peer::*;
@@ -203,12 +203,12 @@ impl State {
local, ..
} => {
// The remote side will continue to receive data.
trace!("recv_close: Open => HalfClosedRemote({:?})", local);
log::trace!("recv_close: Open => HalfClosedRemote({:?})", local);
self.inner = HalfClosedRemote(local);
Ok(())
},
HalfClosedLocal(..) => {
trace!("recv_close: HalfClosedLocal => Closed");
log::trace!("recv_close: HalfClosedLocal => Closed");
self.inner = Closed(Cause::EndStream);
Ok(())
},
@@ -244,7 +244,7 @@ impl State {
// previous state with the received RST_STREAM, so that the queue
// will be cleared by `Prioritize::pop_frame`.
state => {
trace!(
log::trace!(
"recv_reset; reason={:?}; state={:?}; queued={:?}",
reason, state, queued
);
@@ -256,12 +256,12 @@ impl State {
/// We noticed a protocol error.
pub fn recv_err(&mut self, err: &proto::Error) {
use proto::Error::*;
use crate::proto::Error::*;
match self.inner {
Closed(..) => {},
_ => {
trace!("recv_err; err={:?}", err);
log::trace!("recv_err; err={:?}", err);
self.inner = Closed(match *err {
Proto(reason) => Cause::LocallyReset(reason),
Io(..) => Cause::Io,
@@ -274,7 +274,7 @@ impl State {
match self.inner {
Closed(..) => {},
s => {
trace!("recv_eof; state={:?}", s);
log::trace!("recv_eof; state={:?}", s);
self.inner = Closed(Cause::Io);
}
}
@@ -287,11 +287,11 @@ impl State {
remote, ..
} => {
// The remote side will continue to receive data.
trace!("send_close: Open => HalfClosedLocal({:?})", remote);
log::trace!("send_close: Open => HalfClosedLocal({:?})", remote);
self.inner = HalfClosedLocal(remote);
},
HalfClosedRemote(..) => {
trace!("send_close: HalfClosedRemote => Closed");
log::trace!("send_close: HalfClosedRemote => Closed");
self.inner = Closed(Cause::EndStream);
},
state => panic!("send_close: unexpected state {:?}", state),
@@ -418,7 +418,7 @@ impl State {
}
/// Returns a reason if the stream has been reset.
pub(super) fn ensure_reason(&self, mode: PollReset) -> Result<Option<Reason>, ::Error> {
pub(super) fn ensure_reason(&self, mode: PollReset) -> Result<Option<Reason>, crate::Error> {
match self.inner {
Closed(Cause::Proto(reason)) |
Closed(Cause::LocallyReset(reason)) |

View File

@@ -244,10 +244,10 @@ where
///
/// If the stream is already contained by the list, return `false`.
pub fn push(&mut self, stream: &mut store::Ptr) -> bool {
trace!("Queue::push");
log::trace!("Queue::push");
if N::is_queued(stream) {
trace!(" -> already queued");
log::trace!(" -> already queued");
return false;
}
@@ -259,7 +259,7 @@ where
// Queue the stream
match self.indices {
Some(ref mut idxs) => {
trace!(" -> existing entries");
log::trace!(" -> existing entries");
// Update the current tail node to point to `stream`
let key = stream.key();
@@ -269,7 +269,7 @@ where
idxs.tail = stream.key();
},
None => {
trace!(" -> first entry");
log::trace!(" -> first entry");
self.indices = Some(store::Indices {
head: stream.key(),
tail: stream.key(),

View File

@@ -246,12 +246,12 @@ impl Stream {
self.send_capacity_inc = true;
self.send_flow.assign_capacity(capacity);
trace!(" assigned capacity to stream; available={}; buffered={}; id={:?}",
log::trace!(" assigned capacity to stream; available={}; buffered={}; id={:?}",
self.send_flow.available(), self.buffered_send_data, self.id);
// Only notify if the capacity exceeds the amount of buffered data
if self.send_flow.available() > self.buffered_send_data {
trace!(" notifying task");
log::trace!(" notifying task");
self.notify_send();
}
}

View File

@@ -1,13 +1,13 @@
use {client, proto, server};
use codec::{Codec, RecvError, SendError, UserError};
use frame::{self, Frame, Reason};
use proto::{peer, Peer, Open, WindowSize};
use crate::{client, proto, server};
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{self, Frame, Reason};
use crate::proto::{peer, Peer, Open, WindowSize};
use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
use super::recv::RecvHeaderBlockError;
use super::store::{self, Entry, Resolve, Store};
use bytes::{Buf, Bytes};
use futures::{task, Async, Poll};
use futures::{task, Async, Poll, try_ready};
use http::{HeaderMap, Request, Response};
use tokio_io::AsyncWrite;
@@ -97,7 +97,7 @@ where
P: Peer,
{
pub fn new(config: Config) -> Self {
let peer = P::dyn();
let peer = P::r#dyn();
Streams {
inner: Arc::new(Mutex::new(Inner {
@@ -134,7 +134,7 @@ where
// The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id());
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id());
return Ok(());
}
@@ -150,7 +150,7 @@ where
// This may be response headers for a stream we've already
// forgotten about...
if me.actions.may_have_forgotten_stream::<P>(id) {
debug!(
log::debug!(
"recv_headers for old stream={:?}, sending STREAM_CLOSED",
id,
);
@@ -182,7 +182,7 @@ where
// Locally reset streams must ignore frames "for some time".
// This is because the remote may have sent trailers before
// receiving the RST_STREAM frame.
trace!("recv_headers; ignoring trailers on {:?}", stream.id);
log::trace!("recv_headers; ignoring trailers on {:?}", stream.id);
return Ok(());
}
@@ -191,7 +191,7 @@ where
let send_buffer = &mut *send_buffer;
me.counts.transition(stream, |counts, stream| {
trace!(
log::trace!(
"recv_headers; stream={:?}; state={:?}",
stream.id,
stream.state
@@ -254,12 +254,12 @@ where
// The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id());
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id());
return Ok(());
}
if me.actions.may_have_forgotten_stream::<P>(id) {
debug!(
log::debug!(
"recv_data for old stream={:?}, sending STREAM_CLOSED",
id,
);
@@ -314,7 +314,7 @@ where
// The GOAWAY process has begun. All streams with a greater ID than
// specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id());
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id());
return Ok(());
}
@@ -470,7 +470,7 @@ where
// The GOAWAY process has begun. All streams with a greater ID
// than specified as part of GOAWAY should be ignored.
if id > me.actions.recv.max_stream_id() {
trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id());
log::trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id());
return Ok(());
}
@@ -549,7 +549,7 @@ where
me.refs += 1;
key.map(|key| {
let stream = &mut me.store.resolve(key);
trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state);
log::trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state);
StreamRef {
opaque: OpaqueStreamRef::new(self.inner.clone(), stream),
send_buffer: self.send_buffer.clone(),
@@ -740,7 +740,7 @@ impl<B> Streams<B, client::Peer>
where
B: Buf,
{
pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), ::Error> {
pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), crate::Error> {
let mut me = self.inner.lock().unwrap();
let me = &mut *me;
@@ -749,7 +749,7 @@ where
if let Some(pending) = pending {
let mut stream = me.store.resolve(pending.key);
trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
if stream.is_pending_open {
stream.wait_send();
return Ok(Async::NotReady);
@@ -779,7 +779,7 @@ where
actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into());
}
trace!("Streams::recv_eof");
log::trace!("Streams::recv_eof");
me.store
.for_each(|stream| {
@@ -979,7 +979,7 @@ impl<B> StreamRef<B> {
}
/// Request to be notified for if a `RST_STREAM` is received for this stream.
pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll<Reason, ::Error> {
pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll<Reason, crate::Error> {
let mut me = self.opaque.inner.lock().unwrap();
let me = &mut *me;
@@ -1165,7 +1165,7 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
let mut me = match inner.lock() {
Ok(inner) => inner,
Err(_) => if ::std::thread::panicking() {
trace!("StreamRef::drop; mutex poisoned");
log::trace!("StreamRef::drop; mutex poisoned");
return;
} else {
panic!("StreamRef::drop; mutex poisoned");
@@ -1176,7 +1176,7 @@ fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
me.refs -= 1;
let mut stream = me.store.resolve(key);
trace!("drop_stream_ref; stream={:?}", stream);
log::trace!("drop_stream_ref; stream={:?}", stream);
// decrement the stream's ref count by 1.
stream.ref_dec();

View File

@@ -64,11 +64,6 @@
//! will use the HTTP/2.0 protocol without prior negotiation.
//!
//! ```rust
//! extern crate futures;
//! extern crate h2;
//! extern crate http;
//! extern crate tokio;
//!
//! use futures::{Future, Stream};
//! # use futures::future::ok;
//! use h2::server;
@@ -129,17 +124,17 @@
//! [`SendStream`]: ../struct.SendStream.html
//! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html
use {SendStream, RecvStream, ReleaseCapacity, PingPong};
use codec::{Codec, RecvError};
use frame::{self, Pseudo, Reason, Settings, StreamId};
use proto::{self, Config, Prioritized};
use crate::{SendStream, RecvStream, ReleaseCapacity, PingPong};
use crate::codec::{Codec, RecvError};
use crate::frame::{self, Pseudo, Reason, Settings, StreamId};
use crate::proto::{self, Config, Prioritized};
use bytes::{Buf, Bytes, IntoBuf};
use futures::{self, Async, Future, Poll};
use futures::{self, Async, Future, Poll, try_ready};
use http::{HeaderMap, Request, Response};
use std::{convert, fmt, io, mem};
use std::time::Duration;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::{AsyncRead, AsyncWrite, try_nb};
/// In progress HTTP/2.0 connection handshake future.
///
@@ -184,9 +179,6 @@ pub struct Handshake<T, B: IntoBuf = Bytes> {
/// # Examples
///
/// ```
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use futures::{Future, Stream};
/// # use tokio_io::*;
/// # use h2::server;
@@ -229,8 +221,6 @@ pub struct Connection<T, B: IntoBuf> {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -328,9 +318,6 @@ const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
/// # Examples
///
/// ```
/// # extern crate futures;
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use futures::*;
/// # use h2::server;
@@ -420,7 +407,7 @@ where
/// [`poll`]: struct.Connection.html#method.poll
/// [`RecvStream`]: ../struct.RecvStream.html
/// [`SendStream`]: ../struct.SendStream.html
pub fn poll_close(&mut self) -> Poll<(), ::Error> {
pub fn poll_close(&mut self) -> Poll<(), crate::Error> {
self.connection.poll().map_err(Into::into)
}
@@ -479,9 +466,9 @@ where
B::Buf: 'static,
{
type Item = (Request<RecvStream>, SendResponse<B>);
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, ::Error> {
fn poll(&mut self) -> Poll<Option<Self::Item>, crate::Error> {
// Always try to advance the internal state. Getting NotReady also is
// needed to allow this function to return NotReady.
match self.poll_close()? {
@@ -494,7 +481,7 @@ where
}
if let Some(inner) = self.connection.next_incoming() {
trace!("received incoming");
log::trace!("received incoming");
let (head, _) = inner.take_request().into_parts();
let body = RecvStream::new(ReleaseCapacity::new(inner.clone_to_opaque()));
@@ -532,8 +519,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -573,8 +558,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -609,8 +592,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -644,8 +625,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -685,8 +664,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -735,8 +712,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -783,8 +758,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -831,8 +804,6 @@ impl Builder {
/// # Examples
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// # use std::time::Duration;
@@ -876,8 +847,6 @@ impl Builder {
/// Basic usage:
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -898,8 +867,6 @@ impl Builder {
/// type will be `&'static [u8]`.
///
/// ```
/// # extern crate h2;
/// # extern crate tokio_io;
/// # use tokio_io::*;
/// # use h2::server::*;
/// #
@@ -954,7 +921,7 @@ impl<B: IntoBuf> SendResponse<B> {
&mut self,
response: Response<()>,
end_of_stream: bool,
) -> Result<SendStream<B>, ::Error> {
) -> Result<SendStream<B>, crate::Error> {
self.inner
.send_response(response, end_of_stream)
.map(|_| SendStream::new(self.inner.clone()))
@@ -992,7 +959,7 @@ impl<B: IntoBuf> SendResponse<B> {
///
/// Calling this method after having called `send_response` will return
/// a user error.
pub fn poll_reset(&mut self) -> Poll<Reason, ::Error> {
pub fn poll_reset(&mut self) -> Poll<Reason, crate::Error> {
self.inner.poll_reset(proto::PollReset::AwaitingHeaders)
}
@@ -1001,8 +968,8 @@ impl<B: IntoBuf> SendResponse<B> {
/// # Panics
///
/// If the lock on the strean store has been poisoned.
pub fn stream_id(&self) -> ::StreamId {
::StreamId::from_internal(self.inner.stream_id())
pub fn stream_id(&self) -> crate::StreamId {
crate::StreamId::from_internal(self.inner.stream_id())
}
// TODO: Support reserving push promises.
@@ -1024,7 +991,7 @@ where
B: Buf,
{
type Item = Codec<T, B>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
// Flush the codec
@@ -1054,7 +1021,7 @@ where
B: Buf,
{
type Item = Codec<T, B>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut buf = [0; 24];
@@ -1090,11 +1057,11 @@ impl<T, B: IntoBuf> Future for Handshake<T, B>
B: IntoBuf,
{
type Item = Connection<T, B>;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
trace!("Handshake::poll(); state={:?};", self.state);
use server::Handshaking::*;
log::trace!("Handshake::poll(); state={:?};", self.state);
use crate::server::Handshaking::*;
self.state = if let Flushing(ref mut flush) = self.state {
// We're currently flushing a pending SETTINGS frame. Poll the
@@ -1102,11 +1069,11 @@ impl<T, B: IntoBuf> Future for Handshake<T, B>
// for the client preface.
let codec = match flush.poll()? {
Async::NotReady => {
trace!("Handshake::poll(); flush.poll()=NotReady");
log::trace!("Handshake::poll(); flush.poll()=NotReady");
return Ok(Async::NotReady);
},
Async::Ready(flushed) => {
trace!("Handshake::poll(); flush.poll()=Ready");
log::trace!("Handshake::poll(); flush.poll()=Ready");
flushed
}
};
@@ -1140,7 +1107,7 @@ impl<T, B: IntoBuf> Future for Handshake<T, B>
settings: self.builder.settings.clone(),
});
trace!("Handshake::poll(); connection established!");
log::trace!("Handshake::poll(); connection established!");
let mut c = Connection { connection };
if let Some(sz) = self.builder.initial_target_connection_window_size {
c.set_target_window_size(sz);
@@ -1200,7 +1167,7 @@ impl proto::Peer for Peer {
true
}
fn dyn() -> proto::DynPeer {
fn r#dyn() -> proto::DynPeer {
proto::DynPeer::Server
}
@@ -1213,7 +1180,7 @@ impl proto::Peer for Peer {
macro_rules! malformed {
($($arg:tt)*) => {{
debug!($($arg)*);
log::debug!($($arg)*);
return Err(RecvError::Stream {
id: stream_id,
reason: Reason::PROTOCOL_ERROR,
@@ -1231,7 +1198,7 @@ impl proto::Peer for Peer {
// Specifying :status for a request is a protocol error
if pseudo.status.is_some() {
trace!("malformed headers: :status field on request; PROTOCOL_ERROR");
log::trace!("malformed headers: :status field on request; PROTOCOL_ERROR");
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR));
}

View File

@@ -1,9 +1,9 @@
use codec::UserError;
use frame::Reason;
use proto::{self, WindowSize};
use crate::codec::UserError;
use crate::frame::Reason;
use crate::proto::{self, WindowSize};
use bytes::{Bytes, IntoBuf};
use futures::{self, Poll, Async};
use futures::{self, Poll, Async, try_ready};
use http::{HeaderMap};
use std::fmt;
@@ -309,7 +309,7 @@ impl<B: IntoBuf> SendStream<B> {
/// amount of assigned capacity at that point in time. It is also possible
/// that `n` is lower than the previous call if, since then, the caller has
/// sent data.
pub fn poll_capacity(&mut self) -> Poll<Option<usize>, ::Error> {
pub fn poll_capacity(&mut self) -> Poll<Option<usize>, crate::Error> {
let res = try_ready!(self.inner.poll_capacity());
Ok(Async::Ready(res.map(|v| v as usize)))
}
@@ -329,7 +329,7 @@ impl<B: IntoBuf> SendStream<B> {
/// amounts of data being buffered in memory.
///
/// [`Error`]: struct.Error.html
pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), ::Error> {
pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), crate::Error> {
self.inner
.send_data(data.into_buf(), end_of_stream)
.map_err(Into::into)
@@ -339,7 +339,7 @@ impl<B: IntoBuf> SendStream<B> {
///
/// Sending trailers implicitly closes the send stream. Once the send stream
/// is closed, no more data can be sent.
pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), ::Error> {
pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), crate::Error> {
self.inner.send_trailers(trailers).map_err(Into::into)
}
@@ -366,7 +366,7 @@ impl<B: IntoBuf> SendStream<B> {
///
/// If connection sees an error, this returns that error instead of a
/// `Reason`.
pub fn poll_reset(&mut self) -> Poll<Reason, ::Error> {
pub fn poll_reset(&mut self) -> Poll<Reason, crate::Error> {
self.inner.poll_reset(proto::PollReset::Streaming)
}
@@ -383,7 +383,7 @@ impl<B: IntoBuf> SendStream<B> {
// ===== impl StreamId =====
impl StreamId {
pub(crate) fn from_internal(id: ::frame::StreamId) -> Self {
pub(crate) fn from_internal(id: crate::frame::StreamId) -> Self {
StreamId(id.into())
}
}
@@ -417,7 +417,7 @@ impl RecvStream {
}
/// Returns received trailers.
pub fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, ::Error> {
pub fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, crate::Error> {
self.inner.inner.poll_trailers().map_err(Into::into)
}
@@ -433,7 +433,7 @@ impl RecvStream {
impl futures::Stream for RecvStream {
type Item = Bytes;
type Error = ::Error;
type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.inner.poll_data().map_err(Into::into)
@@ -493,7 +493,7 @@ impl ReleaseCapacity {
///
/// [struct level]: #
/// [`set_target_window_size`]: server/struct.Server.html#method.set_target_window_size
pub fn release_capacity(&mut self, sz: usize) -> Result<(), ::Error> {
pub fn release_capacity(&mut self, sz: usize) -> Result<(), crate::Error> {
if sz > proto::MAX_WINDOW_SIZE as usize {
return Err(UserError::ReleaseCapacityTooBig.into());
}
@@ -534,7 +534,7 @@ impl PingPong {
/// .unwrap();
/// # }
/// ```
pub fn send_ping(&mut self, ping: Ping) -> Result<(), ::Error> {
pub fn send_ping(&mut self, ping: Ping) -> Result<(), crate::Error> {
// Passing a `Ping` here is just to be forwards-compatible with
// eventually allowing choosing a ping payload. For now, we can
// just drop it.
@@ -553,8 +553,6 @@ impl PingPong {
/// # Example
///
/// ```
/// # extern crate futures;
/// # extern crate h2;
/// # use futures::Future;
/// # fn doc(mut ping_pong: h2::PingPong) {
/// // let mut ping_pong = ...
@@ -573,7 +571,7 @@ impl PingPong {
/// ```
///
/// [sent]: struct.PingPong.html#method.send_ping
pub fn poll_pong(&mut self) -> Poll<Pong, ::Error> {
pub fn poll_pong(&mut self) -> Poll<Pong, crate::Error> {
try_ready!(self.inner.poll_pong());
Ok(Async::Ready(Pong {
_p: (),

View File

@@ -3,6 +3,7 @@ name = "h2-fuzz"
version = "0.0.0"
publish = false
license = "MIT"
edition = "2018"
[dependencies]
h2 = { path = "../.." }

View File

@@ -1,11 +1,3 @@
extern crate futures;
extern crate tokio_io;
#[macro_use]
extern crate honggfuzz;
extern crate env_logger;
extern crate h2;
extern crate http;
use futures::prelude::*;
use futures::{executor, future, task};
use http::{Method, Request};
@@ -155,7 +147,7 @@ fn run(script: &[u8]) -> Result<(), h2::Error> {
fn main() {
env_logger::init();
loop {
fuzz!(|data: &[u8]| {
honggfuzz::fuzz!(|data: &[u8]| {
eprintln!("{:?}", run(data));
});
}

View File

@@ -2,6 +2,7 @@
name = "h2-support"
version = "0.1.0"
authors = ["Carl Lerche <me@carllerche.com>"]
edition = "2018"
[dependencies]
h2 = { path = "../..", features = ["unstable"] }

View File

@@ -10,7 +10,7 @@ macro_rules! assert_closed {
macro_rules! assert_headers {
($frame:expr) => {{
match $frame {
::h2::frame::Frame::Headers(v) => v,
h2::frame::Frame::Headers(v) => v,
f => panic!("expected HEADERS; actual={:?}", f),
}
}}
@@ -20,7 +20,7 @@ macro_rules! assert_headers {
macro_rules! assert_data {
($frame:expr) => {{
match $frame {
::h2::frame::Frame::Data(v) => v,
h2::frame::Frame::Data(v) => v,
f => panic!("expected DATA; actual={:?}", f),
}
}}
@@ -30,7 +30,7 @@ macro_rules! assert_data {
macro_rules! assert_ping {
($frame:expr) => {{
match $frame {
::h2::frame::Frame::Ping(v) => v,
h2::frame::Frame::Ping(v) => v,
f => panic!("expected PING; actual={:?}", f),
}
}}
@@ -40,7 +40,7 @@ macro_rules! assert_ping {
macro_rules! assert_settings {
($frame:expr) => {{
match $frame {
::h2::frame::Frame::Settings(v) => v,
h2::frame::Frame::Settings(v) => v,
f => panic!("expected SETTINGS; actual={:?}", f),
}
}}

View File

@@ -1,17 +1,7 @@
//! Utilities to support tests.
pub extern crate bytes;
pub extern crate env_logger;
#[macro_use]
pub extern crate futures;
pub extern crate h2;
pub extern crate http;
pub extern crate string;
#[macro_use]
pub extern crate tokio_io;
#[macro_use]
mod assert;
pub mod assert;
pub mod raw;
@@ -25,8 +15,8 @@ pub mod util;
mod client_ext;
mod future_ext;
pub use client_ext::{SendRequestExt};
pub use future_ext::{FutureExt, Unwrap};
pub use crate::client_ext::{SendRequestExt};
pub use crate::future_ext::{FutureExt, Unwrap};
pub type WindowSize = usize;
pub const DEFAULT_WINDOW_SIZE: WindowSize = (1 << 16) - 1;

View File

@@ -1,4 +1,4 @@
use {frames, FutureExt, SendFrame};
use crate::{frames, FutureExt, SendFrame};
use h2::{self, RecvError, SendError};
use h2::frame::{self, Frame};
@@ -22,7 +22,7 @@ pub struct Mock {
#[derive(Debug)]
pub struct Handle {
codec: ::Codec<Pipe>,
codec: crate::Codec<Pipe>,
}
#[derive(Debug)]
@@ -92,7 +92,7 @@ pub fn new_with_write_capacity(cap: usize) -> (Mock, Handle) {
impl Handle {
/// Get a mutable reference to inner Codec.
pub fn codec_mut(&mut self) -> &mut ::Codec<Pipe> {
pub fn codec_mut(&mut self) -> &mut crate::Codec<Pipe> {
&mut self.codec
}
@@ -276,7 +276,7 @@ impl io::Write for Handle {
impl AsyncWrite for Handle {
fn shutdown(&mut self) -> Poll<(), io::Error> {
use std::io::Write;
try_nb!(self.flush());
tokio_io::try_nb!(self.flush());
Ok(().into())
}
}
@@ -359,7 +359,7 @@ impl io::Write for Mock {
impl AsyncWrite for Mock {
fn shutdown(&mut self) -> Poll<(), io::Error> {
use std::io::Write;
try_nb!(self.flush());
tokio_io::try_nb!(self.flush());
Ok(().into())
}
}
@@ -426,7 +426,7 @@ impl io::Write for Pipe {
impl AsyncWrite for Pipe {
fn shutdown(&mut self) -> Poll<(), io::Error> {
use std::io::Write;
try_nb!(self.flush());
tokio_io::try_nb!(self.flush());
Ok(().into())
}
}
@@ -519,7 +519,7 @@ pub trait HandleFutureExt {
.write_buf(&mut buf)
.map_err(|e| panic!("write err={:?}", e));
try_ready!(res);
futures::try_ready!(res);
}
Ok(handle.take().unwrap().into())

View File

@@ -25,8 +25,6 @@
//! Then use it in your project. For example, a test could be written:
//!
//! ```
//! extern crate mock_io;
//!
//! use mock_io::{Builder, Mock};
//! use std::io::{Read, Write};
//!
@@ -88,7 +86,7 @@ use std::time::{Duration, Instant};
pub struct Mock {
inner: Inner,
tokio: tokio::Inner,
async: Option<bool>,
r#async: Option<bool>,
}
#[derive(Debug)]
@@ -103,7 +101,7 @@ pub struct Builder {
actions: VecDeque<Action>,
// true for Tokio, false for blocking, None to auto detect
async: Option<bool>,
r#async: Option<bool>,
}
#[derive(Debug, Clone)]
@@ -171,7 +169,7 @@ impl Builder {
waiting: None,
},
tokio: tokio,
async: src.async,
r#async: src.r#async,
};
let handle = Handle { inner: handle };
@@ -231,7 +229,7 @@ impl Mock {
/// Returns `true` if running in a futures-rs task context
fn is_async(&self) -> bool {
self.async.unwrap_or(tokio::is_task_ctx())
self.r#async.unwrap_or(tokio::is_task_ctx())
}
}
@@ -376,23 +374,19 @@ impl io::Write for Mock {
// use tokio::*;
mod tokio {
extern crate futures;
extern crate tokio_io;
extern crate tokio_timer;
use super::*;
use self::futures::{Future, Stream, Poll, Async};
use self::futures::sync::mpsc;
use self::futures::task::{self, Task};
use self::tokio_io::{AsyncRead, AsyncWrite};
use self::tokio_timer::{Timer, Sleep};
use futures::{Future, Stream, Poll, Async};
use futures::sync::mpsc;
use futures::task::{self, Task};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::{Timer, Sleep};
use std::io;
impl Builder {
pub fn set_async(&mut self, is_async: bool) -> &mut Self {
self.async = Some(is_async);
self.r#async = Some(is_async);
self
}
}
@@ -467,7 +461,7 @@ mod tokio {
pub fn async_read(me: &mut Mock, dst: &mut [u8]) -> io::Result<usize> {
loop {
if let Some(ref mut sleep) = me.tokio.sleep {
let res = try!(sleep.poll());
let res = r#try!(sleep.poll());
if !res.is_ready() {
return Err(io::ErrorKind::WouldBlock.into());
@@ -509,7 +503,7 @@ mod tokio {
pub fn async_write(me: &mut Mock, src: &[u8]) -> io::Result<usize> {
loop {
if let Some(ref mut sleep) = me.tokio.sleep {
let res = try!(sleep.poll());
let res = r#try!(sleep.poll());
if !res.is_ready() {
return Err(io::ErrorKind::WouldBlock.into());

View File

@@ -1,11 +1,11 @@
// Re-export H2 crate
pub use super::h2;
pub use h2;
pub use self::h2::*;
pub use self::h2::client;
pub use self::h2::frame::StreamId;
pub use self::h2::server;
pub use h2::*;
pub use h2::client;
pub use h2::frame::StreamId;
pub use h2::server;
// Re-export mock
pub use super::mock::{self, HandleFutureExt};
@@ -22,11 +22,16 @@ pub use super::util;
// Re-export some type defines
pub use super::{Codec, SendFrame};
// Re-export macros
pub use super::{assert_ping, assert_data, assert_headers, assert_closed,
raw_codec, poll_frame, poll_err};
// Re-export useful crates
pub use super::{bytes, env_logger, futures, http, mock_io, tokio_io};
pub use {bytes, env_logger, futures, http, tokio_io};
pub use super::mock_io;
// Re-export primary future types
pub use self::futures::{Future, IntoFuture, Sink, Stream};
pub use futures::{Future, IntoFuture, Sink, Stream};
// And our Future extensions
pub use super::future_ext::{FutureExt, Unwrap};
@@ -35,9 +40,9 @@ pub use super::future_ext::{FutureExt, Unwrap};
pub use super::client_ext::{SendRequestExt};
// Re-export HTTP types
pub use self::http::{uri, HeaderMap, Method, Request, Response, StatusCode, Version};
pub use http::{uri, HeaderMap, Method, Request, Response, StatusCode, Version};
pub use self::bytes::{Buf, BufMut, Bytes, BytesMut, IntoBuf};
pub use bytes::{Buf, BufMut, Bytes, BytesMut, IntoBuf};
pub use tokio_io::{AsyncRead, AsyncWrite};
@@ -53,7 +58,7 @@ pub trait MockH2 {
fn handshake(&mut self) -> &mut Self;
}
impl MockH2 for mock_io::Builder {
impl MockH2 for super::mock_io::Builder {
fn handshake(&mut self) -> &mut Self {
self.write(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
// Settings frame

View File

@@ -1,6 +1,6 @@
use h2;
use super::string::{String, TryFrom};
use string::{String, TryFrom};
use bytes::Bytes;
use futures::{Async, Future, Poll};
@@ -44,7 +44,7 @@ impl Future for WaitForCapacity {
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, ()> {
let _ = try_ready!(self.stream().poll_capacity().map_err(|_| panic!()));
let _ = futures::try_ready!(self.stream().poll_capacity().map_err(|_| panic!()));
let act = self.stream().capacity();

View File

@@ -3,6 +3,7 @@ name = "h2-tests"
version = "0.1.0"
authors = ["Carl Lerche <me@carllerche.com>"]
publish = false
edition = "2018"
[dependencies]

View File

@@ -1,12 +1,8 @@
#[macro_use]
extern crate log;
extern crate h2_support;
use h2_support::prelude::*;
#[test]
fn handshake() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let mock = mock_io::Builder::new()
.handshake()
@@ -15,7 +11,7 @@ fn handshake() {
let (_client, h2) = client::handshake(mock).wait().unwrap();
trace!("hands have been shook");
log::trace!("hands have been shook");
// At this point, the connection should be closed
h2.wait().unwrap();
@@ -23,7 +19,7 @@ fn handshake() {
#[test]
fn client_other_thread() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -60,7 +56,7 @@ fn client_other_thread() {
#[test]
fn recv_invalid_server_stream_id() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let mock = mock_io::Builder::new()
.handshake()
@@ -84,7 +80,7 @@ fn recv_invalid_server_stream_id() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, _) = client.send_request(request, true).unwrap();
// The connection errors
@@ -96,7 +92,7 @@ fn recv_invalid_server_stream_id() {
#[test]
fn request_stream_id_overflows() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -159,7 +155,7 @@ fn request_stream_id_overflows() {
#[test]
fn client_builder_max_concurrent_streams() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mut settings = frame::Settings::default();
@@ -199,7 +195,7 @@ fn client_builder_max_concurrent_streams() {
#[test]
fn request_over_max_concurrent_streams_errors() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -284,7 +280,7 @@ fn request_over_max_concurrent_streams_errors() {
#[test]
fn send_request_poll_ready_when_connection_error() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -362,7 +358,7 @@ fn send_request_poll_ready_when_connection_error() {
#[test]
fn send_reset_notifies_recv_stream() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -426,7 +422,7 @@ fn send_reset_notifies_recv_stream() {
#[test]
fn http_11_request_without_scheme_or_authority() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -461,7 +457,7 @@ fn http_11_request_without_scheme_or_authority() {
#[test]
fn http_2_request_without_scheme_or_authority() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -501,7 +497,7 @@ fn request_with_h1_version() {}
#[test]
fn request_with_connection_headers() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
// can't assert full handshake, since client never sends a request, and
@@ -544,7 +540,7 @@ fn request_with_connection_headers() {
#[test]
fn connection_close_notifies_response_future() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -587,7 +583,7 @@ fn connection_close_notifies_response_future() {
#[test]
fn connection_close_notifies_client_poll_ready() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -637,7 +633,7 @@ fn connection_close_notifies_client_poll_ready() {
#[test]
fn sending_request_on_closed_connection() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -702,7 +698,7 @@ fn sending_request_on_closed_connection() {
#[test]
fn recv_too_big_headers() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -779,7 +775,7 @@ fn recv_too_big_headers() {
#[test]
fn pending_send_request_gets_reset_by_peer_properly() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let payload = [0; (frame::DEFAULT_INITIAL_WINDOW_SIZE * 2) as usize];
@@ -842,7 +838,7 @@ fn pending_send_request_gets_reset_by_peer_properly() {
#[test]
fn request_without_path() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -870,7 +866,7 @@ fn request_without_path() {
#[test]
fn request_options_with_star() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
// Note the lack of trailing slash.
@@ -913,7 +909,7 @@ fn notify_on_send_capacity() {
// stream, the client is notified.
use std::sync::mpsc;
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let (done_tx, done_rx) = futures::sync::oneshot::channel();
@@ -999,7 +995,7 @@ fn notify_on_send_capacity() {
#[test]
fn send_stream_poll_reset() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv
@@ -1039,7 +1035,7 @@ fn send_stream_poll_reset() {
fn drop_pending_open() {
// This test checks that a stream queued for pending open behaves correctly when its
// client drops.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let (init_tx, init_rx) = futures::sync::oneshot::channel();
@@ -1130,7 +1126,7 @@ fn malformed_response_headers_dont_unlink_stream() {
// This test checks that receiving malformed headers frame on a stream with
// no remaining references correctly resets the stream, without prematurely
// unlinking it.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let (drop_tx, drop_rx) = futures::sync::oneshot::channel();

View File

@@ -1,6 +1,3 @@
#[macro_use]
extern crate h2_support;
use h2_support::prelude::*;
use std::error::Error;
@@ -135,7 +132,7 @@ fn read_headers_empty_payload() {}
#[test]
fn read_continuation_frames() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let large = build_large_headers();
@@ -191,7 +188,7 @@ fn read_continuation_frames() {
#[test]
fn update_max_frame_len_at_rest() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
// TODO: add test for updating max frame length in flight as well?
let mut codec = raw_codec! {
read => [

View File

@@ -1,12 +1,10 @@
extern crate h2_support;
use h2_support::prelude::*;
#[test]
fn write_continuation_frames() {
// An invalid dependency ID results in a stream level error. The hpack
// payload should still be decoded.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let large = build_large_headers();

View File

@@ -1,12 +1,10 @@
extern crate h2_support;
use h2_support::prelude::*;
// In this case, the stream & connection both have capacity, but capacity is not
// explicitly requested.
#[test]
fn send_data_without_requesting_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = [0; 1024];
@@ -52,7 +50,7 @@ fn send_data_without_requesting_capacity() {
#[test]
fn release_capacity_sends_window_update() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = vec![0u8; 16_384];
@@ -126,7 +124,7 @@ fn release_capacity_sends_window_update() {
#[test]
fn release_capacity_of_small_amount_does_not_send_window_update() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = [0; 16];
@@ -190,7 +188,7 @@ fn expand_window_calls_are_coalesced() {}
#[test]
fn recv_data_overflows_connection_window() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -257,7 +255,7 @@ fn recv_data_overflows_connection_window() {
#[test]
fn recv_data_overflows_stream_window() {
// this tests for when streams have smaller windows than their connection
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -324,7 +322,7 @@ fn recv_window_update_causes_overflow() {
#[test]
fn stream_error_release_connection_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -395,7 +393,7 @@ fn stream_error_release_connection_capacity() {
#[test]
fn stream_close_by_data_frame_releases_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let window_size = frame::DEFAULT_INITIAL_WINDOW_SIZE as usize;
@@ -466,7 +464,7 @@ fn stream_close_by_data_frame_releases_capacity() {
#[test]
fn stream_close_by_trailers_frame_releases_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let window_size = frame::DEFAULT_INITIAL_WINDOW_SIZE as usize;
@@ -543,7 +541,7 @@ fn stream_close_by_trailers_frame_releases_capacity() {
#[test]
fn stream_close_by_send_reset_frame_releases_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -595,7 +593,7 @@ fn stream_close_by_recv_reset_frame_releases_capacity() {}
#[test]
fn recv_window_update_on_stream_closed_by_data_frame() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let h2 = client::handshake(io)
@@ -644,7 +642,7 @@ fn recv_window_update_on_stream_closed_by_data_frame() {
#[test]
fn reserved_capacity_assigned_in_multi_window_updates() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let h2 = client::handshake(io)
@@ -725,11 +723,11 @@ fn reserved_capacity_assigned_in_multi_window_updates() {
#[test]
fn connection_notified_on_released_capacity() {
use futures::sync::oneshot;
use crate::futures::sync::oneshot;
use std::sync::mpsc;
use std::thread;
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
// We're going to run the connection on a thread in order to isolate task
@@ -832,7 +830,7 @@ fn connection_notified_on_released_capacity() {
#[test]
fn recv_settings_removes_available_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mut settings = frame::Settings::default();
@@ -890,7 +888,7 @@ fn recv_settings_removes_available_capacity() {
#[test]
fn recv_settings_keeps_assigned_capacity() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let (sent_settings, sent_settings_rx) = futures::sync::oneshot::channel();
@@ -947,7 +945,7 @@ fn recv_settings_keeps_assigned_capacity() {
#[test]
fn recv_no_init_window_then_receive_some_init_window() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mut settings = frame::Settings::default();
@@ -1012,7 +1010,7 @@ fn settings_lowered_capacity_returns_capacity_to_connection() {
use std::sync::mpsc;
use std::thread;
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let (tx1, rx1) = mpsc::channel();
let (tx2, rx2) = mpsc::channel();
@@ -1130,7 +1128,7 @@ fn settings_lowered_capacity_returns_capacity_to_connection() {
#[test]
fn client_increase_target_window_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -1152,7 +1150,7 @@ fn client_increase_target_window_size() {
#[test]
fn increase_target_window_size_after_using_some() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -1204,7 +1202,7 @@ fn increase_target_window_size_after_using_some() {
#[test]
fn decrease_target_window_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -1252,7 +1250,7 @@ fn decrease_target_window_size() {
#[test]
fn server_target_window_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client.assert_server_handshake()
@@ -1273,7 +1271,7 @@ fn server_target_window_size() {
#[test]
fn recv_settings_increase_window_size_after_using_some() {
// See https://github.com/hyperium/h2/issues/208
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let new_win_size = 16_384 * 4; // 1 bigger than default
@@ -1318,7 +1316,7 @@ fn recv_settings_increase_window_size_after_using_some() {
#[test]
fn reserve_capacity_after_peer_closes() {
// See https://github.com/hyperium/h2/issues/300
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -1357,7 +1355,7 @@ fn reserve_capacity_after_peer_closes() {
fn reset_stream_waiting_for_capacity() {
// This tests that receiving a reset on a stream that has some available
// connection-level window reassigns that window to another stream.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
@@ -1419,7 +1417,7 @@ fn reset_stream_waiting_for_capacity() {
#[test]
fn data_padding() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mut body = Vec::new();

View File

@@ -1,9 +1,5 @@
extern crate tokio;
#[macro_use]
extern crate h2_support;
use h2_support::prelude::*;
use h2_support::futures::{Async, Poll};
use futures::{Async, Poll};
use tokio::net::{TcpListener, TcpStream};
use std::{net::SocketAddr, thread, sync::{atomic::{AtomicUsize, Ordering}, Arc}};

View File

@@ -1,11 +1,9 @@
#[macro_use]
extern crate h2_support;
use h2_support::prelude::*;
use h2_support::assert_ping;
#[test]
fn recv_single_ping() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (m, mock) = mock::new();
// Create the handshake
@@ -41,7 +39,7 @@ fn recv_single_ping() {
#[test]
fn recv_multiple_pings() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client.assert_server_handshake()
@@ -65,7 +63,7 @@ fn recv_multiple_pings() {
#[test]
fn pong_has_highest_priority() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let data = Bytes::from(vec![0; 16_384]);
@@ -111,7 +109,7 @@ fn pong_has_highest_priority() {
#[test]
fn user_ping_pong() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -160,7 +158,7 @@ fn user_ping_pong() {
#[test]
fn user_notifies_when_connection_closes() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()

View File

@@ -1,12 +1,9 @@
#[macro_use]
extern crate h2_support;
use h2_support::{DEFAULT_WINDOW_SIZE};
use h2_support::prelude::*;
#[test]
fn single_stream_send_large_body() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = [0; 1024];
@@ -69,7 +66,7 @@ fn single_stream_send_large_body() {
#[test]
fn multiple_streams_with_payload_greater_than_default_window() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = vec![0; 16384*5-1];
@@ -132,7 +129,7 @@ fn multiple_streams_with_payload_greater_than_default_window() {
#[test]
fn single_stream_send_extra_large_body_multi_frames_one_buffer() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = vec![0; 32_768];
@@ -199,7 +196,7 @@ fn single_stream_send_extra_large_body_multi_frames_one_buffer() {
#[test]
fn single_stream_send_body_greater_than_default_window() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = vec![0; 16384*5-1];
@@ -292,7 +289,7 @@ fn single_stream_send_body_greater_than_default_window() {
#[test]
fn single_stream_send_extra_large_body_multi_frames_multi_buffer() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let payload = vec![0; 32_768];
@@ -358,7 +355,7 @@ fn single_stream_send_extra_large_body_multi_frames_multi_buffer() {
#[test]
fn send_data_receive_window_update() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (m, mock) = mock::new();
let h2 = client::handshake(m)

View File

@@ -1,10 +1,8 @@
extern crate h2_support;
use h2_support::prelude::*;
#[test]
fn recv_push_works() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()
@@ -56,7 +54,7 @@ fn recv_push_works() {
#[test]
fn pushed_streams_arent_dropped_too_early() {
// tests that by default, received push promises work
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()
@@ -110,7 +108,7 @@ fn pushed_streams_arent_dropped_too_early() {
#[test]
fn recv_push_when_push_disabled_is_conn_error() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()
@@ -163,7 +161,7 @@ fn recv_push_when_push_disabled_is_conn_error() {
#[test]
fn pending_push_promises_reset_when_dropped() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -206,7 +204,7 @@ fn pending_push_promises_reset_when_dropped() {
#[test]
fn recv_push_promise_over_max_header_list_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -257,7 +255,7 @@ fn recv_push_promise_over_max_header_list_size() {
#[test]
fn recv_invalid_push_promise_headers_is_stream_protocol_error() {
// Unsafe method or content length is stream protocol error
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()
@@ -317,7 +315,7 @@ fn recv_push_promise_with_wrong_authority_is_stream_error() {
#[test]
fn recv_push_promise_skipped_stream_id() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()
@@ -367,7 +365,7 @@ fn recv_push_promise_skipped_stream_id() {
#[test]
fn recv_push_promise_dup_stream_id() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let mock = srv.assert_client_handshake()

View File

@@ -1,7 +1,5 @@
#![deny(warnings)]
extern crate h2_support;
use h2_support::prelude::*;
const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0];
@@ -9,7 +7,7 @@ const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0];
#[test]
fn read_preface_in_multiple_frames() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let mock = mock_io::Builder::new()
.read(b"PRI * HTTP/2.0")
@@ -27,7 +25,7 @@ fn read_preface_in_multiple_frames() {
#[test]
fn server_builder_set_max_concurrent_streams() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let mut settings = frame::Settings::default();
@@ -77,7 +75,7 @@ fn server_builder_set_max_concurrent_streams() {
#[test]
fn serve_request() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -114,7 +112,7 @@ fn accept_with_pending_connections_after_socket_close() {}
#[test]
fn recv_invalid_authority() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let bad_auth = util::byte_str("not:a/good authority");
@@ -141,7 +139,7 @@ fn recv_invalid_authority() {
#[test]
fn recv_connection_header() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let req = |id, name, val| {
@@ -176,7 +174,7 @@ fn recv_connection_header() {
#[test]
fn sends_reset_cancel_when_req_body_is_dropped() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -209,7 +207,7 @@ fn sends_reset_cancel_when_req_body_is_dropped() {
#[test]
fn abrupt_shutdown() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -255,7 +253,7 @@ fn abrupt_shutdown() {
#[test]
fn graceful_shutdown() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -341,7 +339,7 @@ fn graceful_shutdown() {
#[test]
fn sends_reset_cancel_when_res_body_is_dropped() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -399,7 +397,7 @@ fn sends_reset_cancel_when_res_body_is_dropped() {
#[test]
fn too_big_headers_sends_431() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -436,7 +434,7 @@ fn too_big_headers_sends_431() {
#[test]
fn too_big_headers_sends_reset_after_431_if_not_eos() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -472,7 +470,7 @@ fn too_big_headers_sends_reset_after_431_if_not_eos() {
#[test]
fn poll_reset() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -518,7 +516,7 @@ fn poll_reset() {
#[test]
fn poll_reset_io_error() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -560,7 +558,7 @@ fn poll_reset_io_error() {
#[test]
fn poll_reset_after_send_response_is_user_error() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client
@@ -614,7 +612,7 @@ fn poll_reset_after_send_response_is_user_error() {
fn server_error_on_unclean_shutdown() {
use std::io::Write;
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, mut client) = mock::new();
let srv = server::Builder::new()
@@ -628,7 +626,7 @@ fn server_error_on_unclean_shutdown() {
#[test]
fn request_without_authority() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
let client = client

View File

@@ -1,9 +1,5 @@
#![deny(warnings)]
#[macro_use]
extern crate log;
extern crate h2_support;
use h2_support::prelude::*;
#[test]
@@ -30,7 +26,7 @@ fn send_recv_headers_only() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, _) = client.send_request(request, true).unwrap();
let resp = h2.run(response).unwrap();
@@ -72,7 +68,7 @@ fn send_recv_data() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, mut stream) = client.send_request(request, false).unwrap();
// Reserve send capacity
@@ -129,7 +125,7 @@ fn send_headers_recv_data_single_frame() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, _) = client.send_request(request, true).unwrap();
let resp = h2.run(response).unwrap();
@@ -153,7 +149,7 @@ fn send_headers_recv_data_single_frame() {
#[test]
fn closed_streams_are_released() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let h2 = client::handshake(io).unwrap().and_then(|(mut client, h2)| {
@@ -198,7 +194,7 @@ fn closed_streams_are_released() {
#[test]
fn errors_if_recv_frame_exceeds_max_frame_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
let h2 = client::handshake(io).unwrap().and_then(|(mut client, h2)| {
@@ -246,7 +242,7 @@ fn errors_if_recv_frame_exceeds_max_frame_size() {
#[test]
fn configure_max_frame_size() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, mut srv) = mock::new();
let h2 = client::Builder::new()
@@ -290,7 +286,7 @@ fn configure_max_frame_size() {
#[test]
fn recv_goaway_finishes_processed_streams() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -347,7 +343,7 @@ fn recv_goaway_finishes_processed_streams() {
#[test]
fn recv_next_stream_id_updated_by_malformed_headers() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, client) = mock::new();
@@ -385,7 +381,7 @@ fn recv_next_stream_id_updated_by_malformed_headers() {
#[test]
fn skipped_stream_ids_are_implicitly_closed() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv
@@ -424,7 +420,7 @@ fn skipped_stream_ids_are_implicitly_closed() {
#[test]
fn send_rst_stream_allows_recv_data() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -470,7 +466,7 @@ fn send_rst_stream_allows_recv_data() {
#[test]
fn send_rst_stream_allows_recv_trailers() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -512,7 +508,7 @@ fn send_rst_stream_allows_recv_trailers() {
#[test]
fn rst_stream_expires() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -561,7 +557,7 @@ fn rst_stream_expires() {
#[test]
fn rst_stream_max() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -627,7 +623,7 @@ fn rst_stream_max() {
#[test]
fn reserved_state_recv_window_update() {
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()
@@ -715,11 +711,11 @@ fn rst_while_closing() {
// Test to reproduce panic in issue #246 --- receipt of a RST_STREAM frame
// on a stream in the Half Closed (remote) state with a queued EOS causes
// a panic.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
// Rendevous when we've queued a trailers frame
let (tx, rx) = ::futures::sync::oneshot::channel();
let (tx, rx) = crate::futures::sync::oneshot::channel();
let srv = srv.assert_client_handshake()
.unwrap()
@@ -786,13 +782,13 @@ fn rst_with_buffered_data() {
// the data is fully flushed. Given that resetting a stream requires
// clearing all associated state for that stream, this test ensures that the
// buffered up frame is correctly handled.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
// This allows the settings + headers frame through
let (io, srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = ::futures::sync::oneshot::channel();
let (tx, rx) = crate::futures::sync::oneshot::channel();
let srv = srv.assert_client_handshake()
.unwrap()
@@ -851,13 +847,13 @@ fn err_with_buffered_data() {
// the data is fully flushed. Given that resetting a stream requires
// clearing all associated state for that stream, this test ensures that the
// buffered up frame is correctly handled.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
// This allows the settings + headers frame through
let (io, srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = ::futures::sync::oneshot::channel();
let (tx, rx) = crate::futures::sync::oneshot::channel();
let srv = srv.assert_client_handshake()
.unwrap()
@@ -915,13 +911,13 @@ fn send_err_with_buffered_data() {
// the data is fully flushed. Given that resetting a stream requires
// clearing all associated state for that stream, this test ensures that the
// buffered up frame is correctly handled.
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
// This allows the settings + headers frame through
let (io, srv) = mock::new_with_write_capacity(73);
// Synchronize the client / server on response
let (tx, rx) = ::futures::sync::oneshot::channel();
let (tx, rx) = crate::futures::sync::oneshot::channel();
let srv = srv.assert_client_handshake()
.unwrap()
@@ -961,7 +957,7 @@ fn send_err_with_buffered_data() {
stream.send_data(body.into(), true).unwrap();
// Hack to drive the connection, trying to flush data
::futures::future::lazy(|| {
crate::futures::future::lazy(|| {
conn.poll().unwrap();
Ok::<_, ()>(())
}).wait().unwrap();
@@ -987,7 +983,7 @@ fn send_err_with_buffered_data() {
#[test]
fn srv_window_update_on_lower_stream_id() {
// See https://github.com/hyperium/h2/issues/208
let _ = ::env_logger::try_init();
let _ = env_logger::try_init();
let (io, srv) = mock::new();
let srv = srv.assert_client_handshake()

View File

@@ -1,7 +1,3 @@
#[macro_use]
extern crate log;
extern crate h2_support;
use h2_support::prelude::*;
#[test]
@@ -31,7 +27,7 @@ fn recv_trailers_only() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, _) = client.send_request(request, true).unwrap();
let response = h2.run(response).unwrap();
@@ -79,7 +75,7 @@ fn send_trailers_immediately() {
.body(())
.unwrap();
info!("sending request");
log::info!("sending request");
let (response, mut stream) = client.send_request(request, false).unwrap();
let mut trailers = HeaderMap::new();

View File

@@ -3,6 +3,7 @@ name = "genfixture"
version = "0.1.0"
authors = ["Carl Lerche <me@carllerche.com>"]
publish = false
edition = "2018"
[dependencies]
walkdir = "1.0.0"

View File

@@ -1,6 +1,4 @@
extern crate walkdir;
use self::walkdir::WalkDir;
use walkdir::WalkDir;
use std::env;
use std::path::Path;

View File

@@ -3,5 +3,6 @@ name = "genhuff"
version = "0.1.0"
authors = ["Carl Lerche <me@carllerche.com>"]
publish = false
edition = "2018"
[dependencies]