refactor(lib): rename internal http module to proto

This commit is contained in:
Sean McArthur
2017-09-28 18:28:44 -07:00
parent 217941cef2
commit 5027435791
19 changed files with 95 additions and 95 deletions

163
src/proto/body.rs Normal file
View File

@@ -0,0 +1,163 @@
use bytes::Bytes;
use futures::{Poll, Stream};
use futures::sync::mpsc;
use tokio_proto;
use std::borrow::Cow;
use super::Chunk;
pub type TokioBody = tokio_proto::streaming::Body<Chunk, ::Error>;
/// A `Stream` for `Chunk`s used in requests and responses.
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub struct Body(TokioBody);
impl Body {
/// Return an empty body stream
#[inline]
pub fn empty() -> Body {
Body(TokioBody::empty())
}
/// Return a body stream with an associated sender half
#[inline]
pub fn pair() -> (mpsc::Sender<Result<Chunk, ::Error>>, Body) {
let (tx, rx) = TokioBody::pair();
let rx = Body(rx);
(tx, rx)
}
}
impl Default for Body {
#[inline]
fn default() -> Body {
Body::empty()
}
}
impl Stream for Body {
type Item = Chunk;
type Error = ::Error;
#[inline]
fn poll(&mut self) -> Poll<Option<Chunk>, ::Error> {
self.0.poll()
}
}
impl From<Body> for tokio_proto::streaming::Body<Chunk, ::Error> {
#[inline]
fn from(b: Body) -> tokio_proto::streaming::Body<Chunk, ::Error> {
b.0
}
}
impl From<tokio_proto::streaming::Body<Chunk, ::Error>> for Body {
#[inline]
fn from(tokio_body: tokio_proto::streaming::Body<Chunk, ::Error>) -> Body {
Body(tokio_body)
}
}
impl From<mpsc::Receiver<Result<Chunk, ::Error>>> for Body {
#[inline]
fn from(src: mpsc::Receiver<Result<Chunk, ::Error>>) -> Body {
Body(src.into())
}
}
impl From<Chunk> for Body {
#[inline]
fn from (chunk: Chunk) -> Body {
Body(TokioBody::from(chunk))
}
}
impl From<Bytes> for Body {
#[inline]
fn from (bytes: Bytes) -> Body {
Body(TokioBody::from(Chunk::from(bytes)))
}
}
impl From<Vec<u8>> for Body {
#[inline]
fn from (vec: Vec<u8>) -> Body {
Body(TokioBody::from(Chunk::from(vec)))
}
}
impl From<&'static [u8]> for Body {
#[inline]
fn from (slice: &'static [u8]) -> Body {
Body(TokioBody::from(Chunk::from(slice)))
}
}
impl From<Cow<'static, [u8]>> for Body {
#[inline]
fn from (cow: Cow<'static, [u8]>) -> Body {
if let Cow::Borrowed(value) = cow {
Body::from(value)
} else {
Body::from(cow.to_owned())
}
}
}
impl From<String> for Body {
#[inline]
fn from (s: String) -> Body {
Body(TokioBody::from(Chunk::from(s.into_bytes())))
}
}
impl From<&'static str> for Body {
#[inline]
fn from(slice: &'static str) -> Body {
Body(TokioBody::from(Chunk::from(slice.as_bytes())))
}
}
impl From<Cow<'static, str>> for Body {
#[inline]
fn from(cow: Cow<'static, str>) -> Body {
if let Cow::Borrowed(value) = cow {
Body::from(value)
} else {
Body::from(cow.to_owned())
}
}
}
impl From<Option<Body>> for Body {
#[inline]
fn from (body: Option<Body>) -> Body {
body.unwrap_or_default()
}
}
fn _assert_send_sync() {
fn _assert_send<T: Send>() {}
fn _assert_sync<T: Sync>() {}
_assert_send::<Body>();
_assert_send::<Chunk>();
_assert_sync::<Chunk>();
}
#[test]
fn test_body_stream_concat() {
use futures::{Sink, Stream, Future};
let (tx, body) = Body::pair();
::std::thread::spawn(move || {
let tx = tx.send(Ok("hello ".into())).wait().unwrap();
tx.send(Ok("world".into())).wait().unwrap();
});
let total = body.concat2().wait().unwrap();
assert_eq!(total.as_ref(), b"hello world");
}

108
src/proto/chunk.rs Normal file
View File

@@ -0,0 +1,108 @@
use std::fmt;
//use std::mem;
use bytes::Bytes;
/// A piece of a message body.
pub struct Chunk(Inner);
enum Inner {
Shared(Bytes),
}
impl From<Vec<u8>> for Chunk {
#[inline]
fn from(v: Vec<u8>) -> Chunk {
Chunk::from(Bytes::from(v))
}
}
impl From<&'static [u8]> for Chunk {
#[inline]
fn from(slice: &'static [u8]) -> Chunk {
Chunk::from(Bytes::from_static(slice))
}
}
impl From<String> for Chunk {
#[inline]
fn from(s: String) -> Chunk {
s.into_bytes().into()
}
}
impl From<&'static str> for Chunk {
#[inline]
fn from(slice: &'static str) -> Chunk {
slice.as_bytes().into()
}
}
impl From<Bytes> for Chunk {
#[inline]
fn from(mem: Bytes) -> Chunk {
Chunk(Inner::Shared(mem))
}
}
impl From<Chunk> for Bytes {
#[inline]
fn from(chunk: Chunk) -> Bytes {
match chunk.0 {
Inner::Shared(bytes) => bytes,
}
}
}
impl ::std::ops::Deref for Chunk {
type Target = [u8];
#[inline]
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl AsRef<[u8]> for Chunk {
#[inline]
fn as_ref(&self) -> &[u8] {
match self.0 {
Inner::Shared(ref slice) => slice,
}
}
}
impl fmt::Debug for Chunk {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self.as_ref(), f)
}
}
impl Default for Chunk {
#[inline]
fn default() -> Chunk {
Chunk(Inner::Shared(Bytes::new()))
}
}
impl IntoIterator for Chunk {
type Item = u8;
type IntoIter = <Bytes as IntoIterator>::IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
match self.0 {
Inner::Shared(bytes) => bytes.into_iter(),
}
}
}
impl Extend<u8> for Chunk {
#[inline]
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item=u8> {
match self.0 {
Inner::Shared(ref mut bytes) => bytes.extend(iter)
}
}
}

974
src/proto/conn.rs Normal file
View File

@@ -0,0 +1,974 @@
use std::fmt;
use std::io::{self, Write};
use std::marker::PhantomData;
use futures::{Poll, Async, AsyncSink, Stream, Sink, StartSend};
use futures::task::Task;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_proto::streaming::pipeline::{Frame, Transport};
use proto::{Http1Transaction};
use super::io::{Cursor, Buffered};
use super::h1::{Encoder, Decoder};
use method::Method;
use version::HttpVersion;
/// This handles a connection, which will have been established over an
/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple
/// `Transaction`s over HTTP.
///
/// The connection will determine when a message begins and ends as well as
/// determine if this connection can be kept alive after the message,
/// or if it is complete.
pub struct Conn<I, B, T, K = KA> {
io: Buffered<I>,
state: State<B, K>,
_marker: PhantomData<T>
}
impl<I, B, T, K> Conn<I, B, T, K>
where I: AsyncRead + AsyncWrite,
B: AsRef<[u8]>,
T: Http1Transaction,
K: KeepAlive
{
pub fn new(io: I, keep_alive: K) -> Conn<I, B, T, K> {
Conn {
io: Buffered::new(io),
state: State {
keep_alive: keep_alive,
method: None,
read_task: None,
reading: Reading::Init,
writing: Writing::Init,
},
_marker: PhantomData,
}
}
pub fn set_flush_pipeline(&mut self, enabled: bool) {
self.io.set_flush_pipeline(enabled);
}
fn poll2(&mut self) -> Poll<Option<Frame<super::MessageHead<T::Incoming>, super::Chunk, ::Error>>, io::Error> {
trace!("Conn::poll()");
loop {
if self.is_read_closed() {
trace!("Conn::poll when closed");
return Ok(Async::Ready(None));
} else if self.can_read_head() {
return self.read_head();
} else if self.can_write_continue() {
try_nb!(self.flush());
} else if self.can_read_body() {
return self.read_body()
.map(|async| async.map(|chunk| Some(Frame::Body {
chunk: chunk
})))
.or_else(|err| {
self.state.close_read();
Ok(Async::Ready(Some(Frame::Error { error: err.into() })))
});
} else {
trace!("poll when on keep-alive");
self.maybe_park_read();
return Ok(Async::NotReady);
}
}
}
fn is_read_closed(&self) -> bool {
self.state.is_read_closed()
}
#[allow(unused)]
fn is_write_closed(&self) -> bool {
self.state.is_write_closed()
}
fn can_read_head(&self) -> bool {
match self.state.reading {
Reading::Init => true,
_ => false,
}
}
fn can_write_continue(&self) -> bool {
match self.state.writing {
Writing::Continue(..) => true,
_ => false,
}
}
fn can_read_body(&self) -> bool {
match self.state.reading {
Reading::Body(..) => true,
_ => false,
}
}
fn read_head(&mut self) -> Poll<Option<Frame<super::MessageHead<T::Incoming>, super::Chunk, ::Error>>, io::Error> {
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
let (version, head) = match self.io.parse::<T>() {
Ok(Async::Ready(head)) => (head.version, head),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
let must_respond_with_error = !self.state.is_idle();
self.state.close_read();
self.io.consume_leading_lines();
let was_mid_parse = !self.io.read_buf().is_empty();
return if was_mid_parse || must_respond_with_error {
debug!("parse error ({}) with {} bytes", e, self.io.read_buf().len());
Ok(Async::Ready(Some(Frame::Error { error: e })))
} else {
debug!("read eof");
Ok(Async::Ready(None))
};
}
};
match version {
HttpVersion::Http10 | HttpVersion::Http11 => {
let decoder = match T::decoder(&head, &mut self.state.method) {
Ok(d) => d,
Err(e) => {
debug!("decoder error = {:?}", e);
self.state.close_read();
return Ok(Async::Ready(Some(Frame::Error { error: e })));
}
};
self.state.busy();
if head.expecting_continue() {
let msg = b"HTTP/1.1 100 Continue\r\n\r\n";
self.state.writing = Writing::Continue(Cursor::new(msg));
}
let wants_keep_alive = head.should_keep_alive();
self.state.keep_alive &= wants_keep_alive;
let (body, reading) = if decoder.is_eof() {
(false, Reading::KeepAlive)
} else {
(true, Reading::Body(decoder))
};
self.state.reading = reading;
Ok(Async::Ready(Some(Frame::Message { message: head, body: body })))
},
_ => {
error!("unimplemented HTTP Version = {:?}", version);
self.state.close_read();
Ok(Async::Ready(Some(Frame::Error { error: ::Error::Version })))
}
}
}
fn read_body(&mut self) -> Poll<Option<super::Chunk>, io::Error> {
debug_assert!(self.can_read_body());
trace!("Conn::read_body");
let (reading, ret) = match self.state.reading {
Reading::Body(ref mut decoder) => {
let slice = try_ready!(decoder.decode(&mut self.io));
if !slice.is_empty() {
return Ok(Async::Ready(Some(super::Chunk::from(slice))));
} else if decoder.is_eof() {
(Reading::KeepAlive, Ok(Async::Ready(None)))
} else {
(Reading::Closed, Ok(Async::Ready(None)))
}
},
Reading::Init | Reading::KeepAlive | Reading::Closed => unreachable!()
};
self.state.reading = reading;
ret
}
fn maybe_park_read(&mut self) {
if !self.io.is_read_blocked() {
// the Io object is ready to read, which means it will never alert
// us that it is ready until we drain it. However, we're currently
// finished reading, so we need to park the task to be able to
// wake back up later when more reading should happen.
let park = self.state.read_task.as_ref()
.map(|t| !t.will_notify_current())
.unwrap_or(true);
if park {
trace!("parking current task");
self.state.read_task = Some(::futures::task::current());
}
}
}
fn maybe_notify(&mut self) {
// its possible that we returned NotReady from poll() without having
// exhausted the underlying Io. We would have done this when we
// determined we couldn't keep reading until we knew how writing
// would finish.
//
// When writing finishes, we need to wake the task up in case there
// is more reading that can be done, to start a new message.
let wants_read = match self.state.reading {
Reading::Body(..) |
Reading::KeepAlive => return,
Reading::Init => true,
Reading::Closed => false,
};
match self.state.writing {
Writing::Continue(..) |
Writing::Body(..) |
Writing::Ending(..) => return,
Writing::Init |
Writing::KeepAlive |
Writing::Closed => (),
}
if !self.io.is_read_blocked() {
if wants_read && self.io.read_buf().is_empty() {
match self.io.read_from_io() {
Ok(Async::Ready(_)) => (),
Ok(Async::NotReady) => {
trace!("maybe_notify; read_from_io blocked");
return
},
Err(e) => {
trace!("maybe_notify read_from_io error: {}", e);
self.state.close();
}
}
}
if let Some(ref task) = self.state.read_task {
task.notify();
}
}
}
fn try_keep_alive(&mut self) {
self.state.try_keep_alive();
self.maybe_notify();
}
fn can_write_head(&self) -> bool {
match self.state.writing {
Writing::Continue(..) | Writing::Init => true,
_ => false
}
}
fn can_write_body(&self) -> bool {
match self.state.writing {
Writing::Body(..) => true,
Writing::Continue(..) |
Writing::Init |
Writing::Ending(..) |
Writing::KeepAlive |
Writing::Closed => false,
}
}
fn has_queued_body(&self) -> bool {
match self.state.writing {
Writing::Body(_, Some(_)) => true,
_ => false,
}
}
fn write_head(&mut self, head: super::MessageHead<T::Outgoing>, body: bool) {
debug_assert!(self.can_write_head());
let wants_keep_alive = head.should_keep_alive();
self.state.keep_alive &= wants_keep_alive;
let buf = self.io.write_buf_mut();
// if a 100-continue has started but not finished sending, tack the
// remainder on to the start of the buffer.
if let Writing::Continue(ref pending) = self.state.writing {
if pending.has_started() {
buf.extend_from_slice(pending.buf());
}
}
let encoder = T::encode(head, body, &mut self.state.method, buf);
self.state.writing = if !encoder.is_eof() {
Writing::Body(encoder, None)
} else {
Writing::KeepAlive
};
}
fn write_body(&mut self, chunk: Option<B>) -> StartSend<Option<B>, io::Error> {
debug_assert!(self.can_write_body());
if self.has_queued_body() {
try!(self.flush());
}
let state = match self.state.writing {
Writing::Body(ref mut encoder, ref mut queued) => {
if queued.is_some() {
return Ok(AsyncSink::NotReady(chunk));
}
if let Some(chunk) = chunk {
if chunk.as_ref().is_empty() {
return Ok(AsyncSink::Ready);
}
let mut cursor = Cursor::new(chunk);
match encoder.encode(&mut self.io, cursor.buf()) {
Ok(n) => {
cursor.consume(n);
if !cursor.is_written() {
trace!("Conn::start_send frame not written, queued");
*queued = Some(cursor);
}
},
Err(e) => match e.kind() {
io::ErrorKind::WouldBlock => {
trace!("Conn::start_send frame not written, queued");
*queued = Some(cursor);
},
_ => return Err(e)
}
}
if encoder.is_eof() {
Writing::KeepAlive
} else {
return Ok(AsyncSink::Ready);
}
} else {
// end of stream, that means we should try to eof
match encoder.eof() {
Ok(Some(end)) => Writing::Ending(Cursor::new(end)),
Ok(None) => Writing::KeepAlive,
Err(_not_eof) => Writing::Closed,
}
}
},
_ => unreachable!(),
};
self.state.writing = state;
Ok(AsyncSink::Ready)
}
fn write_queued(&mut self) -> Poll<(), io::Error> {
trace!("Conn::write_queued()");
let state = match self.state.writing {
Writing::Continue(ref mut queued) => {
let n = self.io.buffer(queued.buf());
queued.consume(n);
if queued.is_written() {
Writing::Init
} else {
return Ok(Async::NotReady);
}
}
Writing::Body(ref mut encoder, ref mut queued) => {
let complete = if let Some(chunk) = queued.as_mut() {
let n = try_nb!(encoder.encode(&mut self.io, chunk.buf()));
chunk.consume(n);
chunk.is_written()
} else {
true
};
trace!("Conn::write_queued complete = {}", complete);
return if complete {
*queued = None;
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
};
},
Writing::Ending(ref mut ending) => {
let n = self.io.buffer(ending.buf());
ending.consume(n);
if ending.is_written() {
Writing::KeepAlive
} else {
return Ok(Async::NotReady);
}
},
_ => return Ok(Async::Ready(())),
};
self.state.writing = state;
Ok(Async::Ready(()))
}
fn flush(&mut self) -> Poll<(), io::Error> {
loop {
let queue_finished = try!(self.write_queued()).is_ready();
try_nb!(self.io.flush());
if queue_finished {
break;
}
}
self.try_keep_alive();
trace!("flushed {:?}", self.state);
Ok(Async::Ready(()))
}
}
impl<I, B, T, K> Stream for Conn<I, B, T, K>
where I: AsyncRead + AsyncWrite,
B: AsRef<[u8]>,
T: Http1Transaction,
K: KeepAlive,
T::Outgoing: fmt::Debug {
type Item = Frame<super::MessageHead<T::Incoming>, super::Chunk, ::Error>;
type Error = io::Error;
#[inline]
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.poll2().map_err(|err| {
debug!("poll error: {}", err);
err
})
}
}
impl<I, B, T, K> Sink for Conn<I, B, T, K>
where I: AsyncRead + AsyncWrite,
B: AsRef<[u8]>,
T: Http1Transaction,
K: KeepAlive,
T::Outgoing: fmt::Debug {
type SinkItem = Frame<super::MessageHead<T::Outgoing>, B, ::Error>;
type SinkError = io::Error;
#[inline]
fn start_send(&mut self, frame: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
trace!("Conn::start_send( frame={:?} )", DebugFrame(&frame));
let frame: Self::SinkItem = match frame {
Frame::Message { message: head, body } => {
if self.can_write_head() {
self.write_head(head, body);
return Ok(AsyncSink::Ready);
} else {
Frame::Message { message: head, body: body }
}
},
Frame::Body { chunk } => {
if self.can_write_body() {
return self.write_body(chunk)
.map(|async| {
match async {
AsyncSink::Ready => AsyncSink::Ready,
AsyncSink::NotReady(chunk) => AsyncSink::NotReady(Frame::Body {
chunk: chunk,
})
}
});
// This allows when chunk is `None`, or `Some([])`.
} else if chunk.as_ref().map(|c| c.as_ref().len()).unwrap_or(0) == 0 {
return Ok(AsyncSink::Ready);
} else {
Frame::Body { chunk: chunk }
}
},
Frame::Error { error } => {
debug!("received error, closing: {:?}", error);
self.state.close();
return Ok(AsyncSink::Ready);
},
};
error!("writing illegal frame; state={:?}, frame={:?}", self.state.writing, DebugFrame(&frame));
Err(io::Error::new(io::ErrorKind::InvalidInput, "illegal frame"))
}
#[inline]
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
trace!("Conn::poll_complete()");
self.flush().map_err(|err| {
debug!("error writing: {}", err);
err
})
}
#[inline]
fn close(&mut self) -> Poll<(), Self::SinkError> {
try_ready!(self.poll_complete());
self.io.io_mut().shutdown().map_err(|err| {
debug!("error closing: {}", err);
err
})
}
}
impl<I, B, T, K> Transport for Conn<I, B, T, K>
where I: AsyncRead + AsyncWrite + 'static,
B: AsRef<[u8]> + 'static,
T: Http1Transaction + 'static,
K: KeepAlive + 'static,
T::Outgoing: fmt::Debug {}
impl<I, B: AsRef<[u8]>, T, K: fmt::Debug> fmt::Debug for Conn<I, B, T, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Conn")
.field("state", &self.state)
.field("io", &self.io)
.finish()
}
}
struct State<B, K> {
keep_alive: K,
method: Option<Method>,
read_task: Option<Task>,
reading: Reading,
writing: Writing<B>,
}
#[derive(Debug)]
enum Reading {
Init,
Body(Decoder),
KeepAlive,
Closed,
}
enum Writing<B> {
Continue(Cursor<&'static [u8]>),
Init,
Body(Encoder, Option<Cursor<B>>),
Ending(Cursor<&'static [u8]>),
KeepAlive,
Closed,
}
impl<B: AsRef<[u8]>, K: fmt::Debug> fmt::Debug for State<B, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("State")
.field("reading", &self.reading)
.field("writing", &self.writing)
.field("keep_alive", &self.keep_alive)
.field("method", &self.method)
.field("read_task", &self.read_task)
.finish()
}
}
impl<B: AsRef<[u8]>> fmt::Debug for Writing<B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Writing::Continue(ref buf) => f.debug_tuple("Continue")
.field(buf)
.finish(),
Writing::Init => f.write_str("Init"),
Writing::Body(ref enc, ref queued) => f.debug_tuple("Body")
.field(enc)
.field(queued)
.finish(),
Writing::Ending(ref ending) => f.debug_tuple("Ending")
.field(ending)
.finish(),
Writing::KeepAlive => f.write_str("KeepAlive"),
Writing::Closed => f.write_str("Closed"),
}
}
}
impl ::std::ops::BitAndAssign<bool> for KA {
fn bitand_assign(&mut self, enabled: bool) {
if !enabled {
*self = KA::Disabled;
}
}
}
pub trait KeepAlive: fmt::Debug + ::std::ops::BitAndAssign<bool> {
fn busy(&mut self);
fn disable(&mut self);
fn idle(&mut self);
fn status(&self) -> KA;
}
#[derive(Clone, Copy, Debug)]
pub enum KA {
Idle,
Busy,
Disabled,
}
impl Default for KA {
fn default() -> KA {
KA::Busy
}
}
impl KeepAlive for KA {
fn idle(&mut self) {
*self = KA::Idle;
}
fn busy(&mut self) {
*self = KA::Busy;
}
fn disable(&mut self) {
*self = KA::Disabled;
}
fn status(&self) -> KA {
*self
}
}
impl<B, K: KeepAlive> State<B, K> {
fn close(&mut self) {
trace!("State::close()");
self.reading = Reading::Closed;
self.writing = Writing::Closed;
self.keep_alive.disable();
}
fn close_read(&mut self) {
trace!("State::close_read()");
self.reading = Reading::Closed;
self.read_task = None;
self.keep_alive.disable();
}
fn try_keep_alive(&mut self) {
match (&self.reading, &self.writing) {
(&Reading::KeepAlive, &Writing::KeepAlive) => {
if let KA::Busy = self.keep_alive.status() {
self.idle();
} else {
self.close();
}
},
(&Reading::Closed, &Writing::KeepAlive) |
(&Reading::KeepAlive, &Writing::Closed) => {
self.close()
}
_ => ()
}
}
fn is_idle(&self) -> bool {
if let KA::Idle = self.keep_alive.status() {
true
} else {
false
}
}
fn busy(&mut self) {
if let KA::Disabled = self.keep_alive.status() {
return;
}
self.keep_alive.busy();
}
fn idle(&mut self) {
self.method = None;
self.reading = Reading::Init;
self.writing = Writing::Init;
self.keep_alive.idle();
}
fn is_read_closed(&self) -> bool {
match self.reading {
Reading::Closed => true,
_ => false
}
}
#[allow(unused)]
fn is_write_closed(&self) -> bool {
match self.writing {
Writing::Closed => true,
_ => false
}
}
}
// The DebugFrame and DebugChunk are simple Debug implementations that allow
// us to dump the frame into logs, without logging the entirety of the bytes.
struct DebugFrame<'a, T: fmt::Debug + 'a, B: AsRef<[u8]> + 'a>(&'a Frame<super::MessageHead<T>, B, ::Error>);
impl<'a, T: fmt::Debug + 'a, B: AsRef<[u8]> + 'a> fmt::Debug for DebugFrame<'a, T, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self.0 {
Frame::Message { ref body, .. } => {
f.debug_struct("Message")
.field("body", body)
.finish()
},
Frame::Body { chunk: Some(ref chunk) } => {
f.debug_struct("Body")
.field("bytes", &chunk.as_ref().len())
.finish()
},
Frame::Body { chunk: None } => {
f.debug_struct("Body")
.field("bytes", &None::<()>)
.finish()
},
Frame::Error { ref error } => {
f.debug_struct("Error")
.field("error", error)
.finish()
}
}
}
}
#[cfg(test)]
mod tests {
use futures::{Async, Future, Stream, Sink};
use futures::future;
use tokio_proto::streaming::pipeline::Frame;
use proto::{self, MessageHead, ServerTransaction};
use super::super::h1::Encoder;
use mock::AsyncIo;
use super::{Conn, Reading, Writing};
use ::uri::Uri;
use std::str::FromStr;
impl<T> Writing<T> {
fn is_queued(&self) -> bool {
match *self {
Writing::Body(_, Some(_)) => true,
_ => false,
}
}
}
#[test]
fn test_conn_init_read() {
let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec();
let len = good_message.len();
let io = AsyncIo::new_buf(good_message, len);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
match conn.poll().unwrap() {
Async::Ready(Some(Frame::Message { message, body: false })) => {
assert_eq!(message, MessageHead {
subject: ::proto::RequestLine(::Get, Uri::from_str("/").unwrap()),
.. MessageHead::default()
})
},
f => panic!("frame is not Frame::Message: {:?}", f)
}
}
#[test]
fn test_conn_parse_partial() {
let _: Result<(), ()> = future::lazy(|| {
let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec();
let io = AsyncIo::new_buf(good_message, 10);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
assert!(conn.poll().unwrap().is_not_ready());
conn.io.io_mut().block_in(50);
let async = conn.poll().unwrap();
assert!(async.is_ready());
match async {
Async::Ready(Some(Frame::Message { .. })) => (),
f => panic!("frame is not Message: {:?}", f),
}
Ok(())
}).wait();
}
#[test]
fn test_conn_init_read_eof_idle() {
let io = AsyncIo::new_buf(vec![], 1);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.idle();
match conn.poll().unwrap() {
Async::Ready(None) => {},
other => panic!("frame is not None: {:?}", other)
}
}
#[test]
fn test_conn_init_read_eof_idle_partial_parse() {
let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.idle();
match conn.poll().unwrap() {
Async::Ready(Some(Frame::Error { .. })) => {},
other => panic!("frame is not Error: {:?}", other)
}
}
#[test]
fn test_conn_init_read_eof_busy() {
let io = AsyncIo::new_buf(vec![], 1);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.busy();
match conn.poll().unwrap() {
Async::Ready(Some(Frame::Error { .. })) => {},
other => panic!("frame is not Error: {:?}", other)
}
}
#[test]
fn test_conn_closed_read() {
let io = AsyncIo::new_buf(vec![], 0);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.close();
match conn.poll().unwrap() {
Async::Ready(None) => {},
other => panic!("frame is not None: {:?}", other)
}
}
#[test]
fn test_conn_body_write_length() {
extern crate pretty_env_logger;
let _ = pretty_env_logger::init();
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 0);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
let max = ::proto::io::MAX_BUFFER_SIZE + 4096;
conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64), None);
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 8].into()) }).unwrap().is_ready());
assert!(!conn.state.writing.is_queued());
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; max].into()) }).unwrap().is_ready());
assert!(conn.state.writing.is_queued());
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; 1024 * 8].into()) }).unwrap().is_not_ready());
conn.io.io_mut().block_in(1024 * 3);
assert!(conn.poll_complete().unwrap().is_not_ready());
conn.io.io_mut().block_in(1024 * 3);
assert!(conn.poll_complete().unwrap().is_not_ready());
conn.io.io_mut().block_in(max * 2);
assert!(conn.poll_complete().unwrap().is_ready());
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 8].into()) }).unwrap().is_ready());
Ok(())
}).wait();
}
#[test]
fn test_conn_body_write_chunked() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.writing = Writing::Body(Encoder::chunked(), None);
assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready());
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'x'; 8192].into()) }).unwrap().is_ready());
Ok(())
}).wait();
}
#[test]
fn test_conn_body_flush() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.writing = Writing::Body(Encoder::length(1024 * 1024), None);
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready());
assert!(conn.state.writing.is_queued());
assert!(conn.poll_complete().unwrap().is_ready());
assert!(!conn.state.writing.is_queued());
assert!(conn.io.io_mut().flushed());
Ok(())
}).wait();
}
#[test]
fn test_conn_parking() {
use std::sync::Arc;
use futures::executor::Notify;
use futures::executor::NotifyHandle;
struct Car {
permit: bool,
}
impl Notify for Car {
fn notify(&self, _id: usize) {
assert!(self.permit, "unparked without permit");
}
}
fn car(permit: bool) -> NotifyHandle {
Arc::new(Car {
permit: permit,
}).into()
}
// test that once writing is done, unparks
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.reading = Reading::KeepAlive;
assert!(conn.poll().unwrap().is_not_ready());
conn.state.writing = Writing::KeepAlive;
assert!(conn.poll_complete().unwrap().is_ready());
Ok::<(), ()>(())
});
::futures::executor::spawn(f).poll_future_notify(&car(true), 0).unwrap();
// test that flushing when not waiting on read doesn't unpark
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.writing = Writing::KeepAlive;
assert!(conn.poll_complete().unwrap().is_ready());
Ok::<(), ()>(())
});
::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap();
// test that flushing and writing isn't done doesn't unpark
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.reading = Reading::KeepAlive;
assert!(conn.poll().unwrap().is_not_ready());
conn.state.writing = Writing::Body(Encoder::length(5_000), None);
assert!(conn.poll_complete().unwrap().is_ready());
Ok::<(), ()>(())
});
::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap();
}
#[test]
fn test_conn_closed_write() {
let io = AsyncIo::new_buf(vec![], 0);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.close();
match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) {
Err(_e) => {},
other => panic!("did not return Err: {:?}", other)
}
assert!(conn.state.is_write_closed());
}
#[test]
fn test_conn_write_empty_chunk() {
let io = AsyncIo::new_buf(vec![], 0);
let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io, Default::default());
conn.state.writing = Writing::KeepAlive;
assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready());
assert!(conn.start_send(Frame::Body { chunk: Some(Vec::new().into()) }).unwrap().is_ready());
conn.start_send(Frame::Body { chunk: Some(vec![b'a'].into()) }).unwrap_err();
}
}

59
src/proto/h1/date.rs Normal file
View File

@@ -0,0 +1,59 @@
use std::cell::RefCell;
use std::fmt::{self, Write};
use std::str;
use time::{self, Duration};
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub const DATE_VALUE_LENGTH: usize = 29;
pub fn extend(dst: &mut Vec<u8>) {
CACHED.with(|cache| {
let mut cache = cache.borrow_mut();
let now = time::get_time();
if now > cache.next_update {
cache.update(now);
}
dst.extend_from_slice(cache.buffer());
})
}
struct CachedDate {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
next_update: time::Timespec,
}
thread_local!(static CACHED: RefCell<CachedDate> = RefCell::new(CachedDate {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
next_update: time::Timespec::new(0, 0),
}));
impl CachedDate {
fn buffer(&self) -> &[u8] {
&self.bytes[..]
}
fn update(&mut self, now: time::Timespec) {
self.pos = 0;
write!(self, "{}", time::at_utc(now).rfc822()).unwrap();
assert!(self.pos == DATE_VALUE_LENGTH);
self.next_update = now + Duration::seconds(1);
self.next_update.nsec = 0;
}
}
impl fmt::Write for CachedDate {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
}

499
src/proto/h1/decode.rs Normal file
View File

@@ -0,0 +1,499 @@
use std::usize;
use std::io;
use futures::{Async, Poll};
use bytes::Bytes;
use proto::io::MemRead;
use self::Kind::{Length, Chunked, Eof};
/// Decoders to handle different Transfer-Encodings.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Debug, Clone, PartialEq)]
pub struct Decoder {
kind: Kind,
}
impl Decoder {
pub fn length(x: u64) -> Decoder {
Decoder { kind: Kind::Length(x) }
}
pub fn chunked() -> Decoder {
Decoder { kind: Kind::Chunked(ChunkedState::Size, 0) }
}
pub fn eof() -> Decoder {
Decoder { kind: Kind::Eof(false) }
}
}
#[derive(Debug, Clone, PartialEq)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive integer.
Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof(bool),
}
#[derive(Debug, PartialEq, Clone)]
enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl Decoder {
pub fn is_eof(&self) -> bool {
trace!("is_eof? {:?}", self);
match self.kind {
Length(0) |
Chunked(ChunkedState::End, _) |
Eof(true) => true,
_ => false,
}
}
}
impl Decoder {
pub fn decode<R: MemRead>(&mut self, body: &mut R) -> Poll<Bytes, io::Error> {
match self.kind {
Length(ref mut remaining) => {
trace!("Sized read, remaining={:?}", remaining);
if *remaining == 0 {
Ok(Async::Ready(Bytes::new()))
} else {
let to_read = *remaining as usize;
let buf = try_ready!(body.read_mem(to_read));
let num = buf.as_ref().len() as u64;
trace!("Length read: {}", num);
if num > *remaining {
*remaining = 0;
} else if num == 0 {
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
} else {
*remaining -= num;
}
Ok(Async::Ready(buf))
}
}
Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
// advances the chunked state
*state = try_ready!(state.step(body, size, &mut buf));
if *state == ChunkedState::End {
trace!("end of chunked");
return Ok(Async::Ready(Bytes::new()));
}
if let Some(buf) = buf {
return Ok(Async::Ready(buf));
}
}
}
Eof(ref mut is_eof) => {
if *is_eof {
Ok(Async::Ready(Bytes::new()))
} else {
// 8192 chosen because its about 2 packets, there probably
// won't be that much available, so don't have MemReaders
// allocate buffers to big
let slice = try_ready!(body.read_mem(8192));
*is_eof = slice.is_empty();
Ok(Async::Ready(slice))
}
}
}
}
}
macro_rules! byte (
($rdr:ident) => ({
let buf = try_ready!($rdr.read_mem(1));
if !buf.is_empty() {
buf[0]
} else {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected eof during chunk size line"));
}
})
);
impl ChunkedState {
fn step<R: MemRead>(&self,
body: &mut R,
size: &mut u64,
buf: &mut Option<Bytes>)
-> Poll<ChunkedState, io::Error> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Ok(Async::Ready(ChunkedState::End)),
}
}
fn read_size<R: MemRead>(rdr: &mut R, size: &mut u64) -> Poll<ChunkedState, io::Error> {
trace!("Read chunk hex size");
let radix = 16;
match byte!(rdr) {
b @ b'0'...b'9' => {
*size *= radix;
*size += (b - b'0') as u64;
}
b @ b'a'...b'f' => {
*size *= radix;
*size += (b + 10 - b'a') as u64;
}
b @ b'A'...b'F' => {
*size *= radix;
*size += (b + 10 - b'A') as u64;
}
b'\t' | b' ' => return Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => return Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => return Ok(Async::Ready(ChunkedState::SizeLf)),
_ => {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size"));
}
}
Ok(Async::Ready(ChunkedState::Size))
}
fn read_size_lws<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
_ => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space"))
}
}
}
fn read_extension<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
trace!("read_extension");
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
_ => Ok(Async::Ready(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf<R: MemRead>(rdr: &mut R, size: &mut u64) -> Poll<ChunkedState, io::Error> {
trace!("Chunk size is {:?}", size);
match byte!(rdr) {
b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)),
b'\n' if *size == 0 => Ok(Async::Ready(ChunkedState::EndCr)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk size LF")),
}
}
fn read_body<R: MemRead>(rdr: &mut R,
rem: &mut u64,
buf: &mut Option<Bytes>)
-> Poll<ChunkedState, io::Error> {
trace!("Chunked read, remaining={:?}", rem);
// cap remaining bytes at the max capacity of usize
let rem_cap = match *rem {
r if r > usize::MAX as u64 => usize::MAX,
r => r as usize,
};
let to_read = rem_cap;
let slice = try_ready!(rdr.read_mem(to_read));
let count = slice.len();
if count == 0 {
*rem = 0;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "early eof"));
}
*buf = Some(slice);
*rem -= count as u64;
if *rem > 0 {
Ok(Async::Ready(ChunkedState::Body))
} else {
Ok(Async::Ready(ChunkedState::BodyCr))
}
}
fn read_body_cr<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::BodyLf)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body CR")),
}
}
fn read_body_lf<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::Size)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body LF")),
}
}
fn read_end_cr<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::EndLf)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end CR")),
}
}
fn read_end_lf<R: MemRead>(rdr: &mut R) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::End)),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end LF")),
}
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use std::io;
use std::io::Write;
use super::Decoder;
use super::ChunkedState;
use proto::io::MemRead;
use futures::{Async, Poll};
use bytes::{BytesMut, Bytes};
use mock::AsyncIo;
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
let n = ::std::cmp::min(len, self.len());
if n > 0 {
let (a, b) = self.split_at(n);
let mut buf = BytesMut::from(a);
*self = b;
Ok(Async::Ready(buf.split_to(n).freeze()))
} else {
Ok(Async::Ready(Bytes::new()))
}
}
}
trait HelpUnwrap<T> {
fn unwrap(self) -> T;
}
impl HelpUnwrap<Bytes> for Async<Bytes> {
fn unwrap(self) -> Bytes {
match self {
Async::Ready(bytes) => bytes,
Async::NotReady => panic!(),
}
}
}
impl HelpUnwrap<ChunkedState> for Async<ChunkedState> {
fn unwrap(self) -> ChunkedState {
match self {
Async::Ready(state) => state,
Async::NotReady => panic!(),
}
}
}
#[test]
fn test_read_chunk_size() {
use std::io::ErrorKind::{UnexpectedEof, InvalidInput};
fn read(s: &str) -> u64 {
let mut state = ChunkedState::Size;
let rdr = &mut s.as_bytes();
let mut size = 0;
loop {
let result = state.step(rdr, &mut size, &mut None);
let desc = format!("read_size failed for {:?}", s);
state = result.expect(desc.as_str()).unwrap();
if state == ChunkedState::Body || state == ChunkedState::EndCr {
break;
}
}
size
}
fn read_err(s: &str, expected_err: io::ErrorKind) {
let mut state = ChunkedState::Size;
let rdr = &mut s.as_bytes();
let mut size = 0;
loop {
let result = state.step(rdr, &mut size, &mut None);
state = match result {
Ok(s) => s.unwrap(),
Err(e) => {
assert!(expected_err == e.kind(), "Reading {:?}, expected {:?}, but got {:?}",
s, expected_err, e.kind());
return;
}
};
if state == ChunkedState::Body || state == ChunkedState::End {
panic!(format!("Was Ok. Expected Err for {:?}", s));
}
}
}
assert_eq!(1, read("1\r\n"));
assert_eq!(1, read("01\r\n"));
assert_eq!(0, read("0\r\n"));
assert_eq!(0, read("00\r\n"));
assert_eq!(10, read("A\r\n"));
assert_eq!(10, read("a\r\n"));
assert_eq!(255, read("Ff\r\n"));
assert_eq!(255, read("Ff \r\n"));
// Missing LF or CRLF
read_err("F\rF", InvalidInput);
read_err("F", UnexpectedEof);
// Invalid hex digit
read_err("X\r\n", InvalidInput);
read_err("1X\r\n", InvalidInput);
read_err("-\r\n", InvalidInput);
read_err("-1\r\n", InvalidInput);
// Acceptable (if not fully valid) extensions do not influence the size
assert_eq!(1, read("1;extension\r\n"));
assert_eq!(10, read("a;ext name=value\r\n"));
assert_eq!(1, read("1;extension;extension2\r\n"));
assert_eq!(1, read("1;;; ;\r\n"));
assert_eq!(2, read("2; extension...\r\n"));
assert_eq!(3, read("3 ; extension=123\r\n"));
assert_eq!(3, read("3 ;\r\n"));
assert_eq!(3, read("3 ; \r\n"));
// Invalid extensions cause an error
read_err("1 invalid extension\r\n", InvalidInput);
read_err("1 A\r\n", InvalidInput);
read_err("1;no CRLF", UnexpectedEof);
}
#[test]
fn test_read_sized_early_eof() {
let mut bytes = &b"foo bar"[..];
let mut decoder = Decoder::length(10);
assert_eq!(decoder.decode(&mut bytes).unwrap().unwrap().len(), 7);
let e = decoder.decode(&mut bytes).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::Other);
assert_eq!(e.description(), "early eof");
}
#[test]
fn test_read_chunked_early_eof() {
let mut bytes = &b"\
9\r\n\
foo bar\
"[..];
let mut decoder = Decoder::chunked();
assert_eq!(decoder.decode(&mut bytes).unwrap().unwrap().len(), 7);
let e = decoder.decode(&mut bytes).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
assert_eq!(e.description(), "early eof");
}
#[test]
fn test_read_chunked_single_read() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
let buf = Decoder::chunked().decode(&mut mock_buf).expect("decode").unwrap();
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
}
#[test]
fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
let mut decoder = Decoder::chunked();
// normal read
let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap();
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
// eof read
let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap();
assert_eq!(0, buf.len());
// ensure read after eof also returns eof
let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap();
assert_eq!(0, buf.len());
}
// perform an async read using a custom buffer size and causing a blocking
// read at the specified byte
fn read_async(mut decoder: Decoder,
content: &[u8],
block_at: usize)
-> String {
let content_len = content.len();
let mut ins = AsyncIo::new(content, block_at);
let mut outs = Vec::new();
loop {
match decoder.decode(&mut ins).expect("unexpected decode error: {}") {
Async::Ready(buf) => {
if buf.is_empty() {
break; // eof
}
outs.write(buf.as_ref()).expect("write buffer");
},
Async::NotReady => {
ins.block_in(content_len); // we only block once
}
};
}
String::from_utf8(outs).expect("decode String")
}
// iterate over the different ways that this async read could go.
// tests blocking a read at each byte along the content - The shotgun approach
fn all_async_cases(content: &str, expected: &str, decoder: Decoder) {
let content_len = content.len();
for block_at in 0..content_len {
let actual = read_async(decoder.clone(), content.as_bytes(), block_at);
assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at);
}
}
#[test]
fn test_read_length_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::length(content.len() as u64));
}
#[test]
fn test_read_chunked_async() {
let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
let expected = "foobar";
all_async_cases(content, expected, Decoder::chunked());
}
#[test]
fn test_read_eof_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::eof());
}
}

309
src/proto/h1/encode.rs Normal file
View File

@@ -0,0 +1,309 @@
use std::cmp;
use std::io::{self, Write};
use proto::io::AtomicWrite;
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug, Clone)]
pub struct Encoder {
kind: Kind,
}
#[derive(Debug, PartialEq, Clone)]
enum Kind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(Chunked),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
}
impl Encoder {
pub fn chunked() -> Encoder {
Encoder {
kind: Kind::Chunked(Chunked::Init),
}
}
pub fn length(len: u64) -> Encoder {
Encoder {
kind: Kind::Length(len),
}
}
pub fn is_eof(&self) -> bool {
match self.kind {
Kind::Length(0) |
Kind::Chunked(Chunked::End) => true,
_ => false
}
}
pub fn eof(&self) -> Result<Option<&'static [u8]>, NotEof> {
match self.kind {
Kind::Length(0) => Ok(None),
Kind::Chunked(Chunked::Init) => Ok(Some(b"0\r\n\r\n")),
_ => Err(NotEof),
}
}
pub fn encode<W: AtomicWrite>(&mut self, w: &mut W, msg: &[u8]) -> io::Result<usize> {
match self.kind {
Kind::Chunked(ref mut chunked) => {
chunked.encode(w, msg)
},
Kind::Length(ref mut remaining) => {
if msg.is_empty() {
return Ok(0);
}
let n = {
let max = cmp::min(*remaining as usize, msg.len());
trace!("sized write = {}", max);
let slice = &msg[..max];
try!(w.write_atomic(&[slice]))
};
if n == 0 {
return Err(io::Error::new(io::ErrorKind::WriteZero, "write zero"));
}
*remaining -= n as u64;
trace!("encoded {} bytes, remaining = {}", n, remaining);
Ok(n)
},
}
}
}
#[derive(Debug)]
pub struct NotEof;
#[derive(Debug, PartialEq, Clone)]
enum Chunked {
Init,
Size(ChunkSize),
SizeCr,
SizeLf,
Body(usize),
BodyCr,
BodyLf,
End,
}
impl Chunked {
fn encode<W: AtomicWrite>(&mut self, w: &mut W, msg: &[u8]) -> io::Result<usize> {
match *self {
Chunked::Init => {
let mut size = ChunkSize {
bytes: [0; CHUNK_SIZE_MAX_BYTES],
pos: 0,
len: 0,
};
trace!("chunked write, size = {:?}", msg.len());
write!(&mut size, "{:X}", msg.len())
.expect("CHUNK_SIZE_MAX_BYTES should fit any usize");
*self = Chunked::Size(size);
}
Chunked::End => return Ok(0),
_ => {}
}
let mut n = {
let pieces = match *self {
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
Chunked::Size(ref size) => [
&size.bytes[size.pos.into() .. size.len.into()],
&b"\r\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::SizeCr => [
&b""[..],
&b"\r\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::SizeLf => [
&b""[..],
&b"\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::Body(pos) => [
&b""[..],
&b""[..],
&msg[pos..],
&b"\r\n"[..],
],
Chunked::BodyCr => [
&b""[..],
&b""[..],
&b""[..],
&b"\r\n"[..],
],
Chunked::BodyLf => [
&b""[..],
&b""[..],
&b""[..],
&b"\n"[..],
],
Chunked::End => unreachable!("Chunked::End shouldn't write more")
};
try!(w.write_atomic(&pieces))
};
while n > 0 {
match *self {
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
Chunked::Size(mut size) => {
n = size.update(n);
if size.len == 0 {
*self = Chunked::SizeCr;
} else {
*self = Chunked::Size(size);
}
},
Chunked::SizeCr => {
*self = Chunked::SizeLf;
n -= 1;
}
Chunked::SizeLf => {
*self = Chunked::Body(0);
n -= 1;
}
Chunked::Body(pos) => {
let left = msg.len() - pos;
if n >= left {
*self = Chunked::BodyCr;
n -= left;
} else {
*self = Chunked::Body(pos + n);
n = 0;
}
}
Chunked::BodyCr => {
*self = Chunked::BodyLf;
n -= 1;
}
Chunked::BodyLf => {
assert!(n == 1);
*self = if msg.len() == 0 {
Chunked::End
} else {
Chunked::Init
};
n = 0;
},
Chunked::End => unreachable!("Chunked::End shouldn't have any to write")
}
}
match *self {
Chunked::Init |
Chunked::End => Ok(msg.len()),
_ => Err(io::ErrorKind::WouldBlock.into())
}
}
}
#[cfg(target_pointer_width = "32")]
const USIZE_BYTES: usize = 4;
#[cfg(target_pointer_width = "64")]
const USIZE_BYTES: usize = 8;
// each byte will become 2 hex
const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;
#[derive(Clone, Copy)]
struct ChunkSize {
bytes: [u8; CHUNK_SIZE_MAX_BYTES],
pos: u8,
len: u8,
}
impl ChunkSize {
fn update(&mut self, n: usize) -> usize {
let diff = (self.len - self.pos).into();
if n >= diff {
self.pos = 0;
self.len = 0;
n - diff
} else {
self.pos += n as u8; // just verified it was a small usize
0
}
}
}
impl ::std::fmt::Debug for ChunkSize {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct("ChunkSize")
.field("bytes", &&self.bytes[..self.len.into()])
.field("pos", &self.pos)
.finish()
}
}
impl ::std::cmp::PartialEq for ChunkSize {
fn eq(&self, other: &ChunkSize) -> bool {
self.len == other.len &&
self.pos == other.pos &&
(&self.bytes[..]) == (&other.bytes[..])
}
}
impl io::Write for ChunkSize {
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
let n = (&mut self.bytes[self.len.into() ..]).write(msg)
.expect("&mut [u8].write() cannot error");
self.len += n as u8; // safe because bytes is never bigger than 256
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::Encoder;
use mock::{AsyncIo, Buf};
#[test]
fn test_chunked_encode_sync() {
let mut dst = Buf::new();
let mut encoder = Encoder::chunked();
encoder.encode(&mut dst, b"foo bar").unwrap();
encoder.encode(&mut dst, b"baz quux herp").unwrap();
encoder.encode(&mut dst, b"").unwrap();
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
}
#[test]
fn test_chunked_encode_async() {
let mut dst = AsyncIo::new(Buf::new(), 7);
let mut encoder = Encoder::chunked();
assert!(encoder.encode(&mut dst, b"foo bar").is_err());
dst.block_in(6);
assert_eq!(7, encoder.encode(&mut dst, b"foo bar").unwrap());
dst.block_in(30);
assert_eq!(13, encoder.encode(&mut dst, b"baz quux herp").unwrap());
encoder.encode(&mut dst, b"").unwrap();
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
}
#[test]
fn test_sized_encode() {
let mut dst = Buf::new();
let mut encoder = Encoder::length(8);
encoder.encode(&mut dst, b"foo bar").unwrap();
assert_eq!(encoder.encode(&mut dst, b"baz").unwrap(), 1);
assert_eq!(dst, b"foo barb");
}
}

8
src/proto/h1/mod.rs Normal file
View File

@@ -0,0 +1,8 @@
pub use self::decode::Decoder;
pub use self::encode::Encoder;
mod date;
mod decode;
mod encode;
pub mod parse;

585
src/proto/h1/parse.rs Normal file
View File

@@ -0,0 +1,585 @@
use std::borrow::Cow;
use std::fmt::{self, Write};
use httparse;
use bytes::{BytesMut, Bytes};
use header::{self, Headers, ContentLength, TransferEncoding};
use proto::{MessageHead, RawStatus, Http1Transaction, ParseResult,
ServerTransaction, ClientTransaction, RequestLine, RequestHead};
use proto::h1::{Encoder, Decoder, date};
use method::Method;
use status::StatusCode;
use version::HttpVersion::{Http10, Http11};
const MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
impl Http1Transaction for ServerTransaction {
type Incoming = RequestLine;
type Outgoing = StatusCode;
fn parse(buf: &mut BytesMut) -> ParseResult<RequestLine> {
if buf.len() == 0 {
return Ok(None);
}
let mut headers_indices = [HeaderIndices {
name: (0, 0),
value: (0, 0)
}; MAX_HEADERS];
let (len, method, path, version, headers_len) = {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
trace!("Request.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
let mut req = httparse::Request::new(&mut headers);
match try!(req.parse(&buf)) {
httparse::Status::Complete(len) => {
trace!("Request.parse Complete({})", len);
let method = try!(req.method.unwrap().parse());
let path = req.path.unwrap();
let bytes_ptr = buf.as_ref().as_ptr() as usize;
let path_start = path.as_ptr() as usize - bytes_ptr;
let path_end = path_start + path.len();
let path = (path_start, path_end);
let version = if req.version.unwrap() == 1 { Http11 } else { Http10 };
record_header_indices(buf.as_ref(), &req.headers, &mut headers_indices);
let headers_len = req.headers.len();
(len, method, path, version, headers_len)
}
httparse::Status::Partial => return Ok(None),
}
};
let mut headers = Headers::with_capacity(headers_len);
let slice = buf.split_to(len).freeze();
let path = slice.slice(path.0, path.1);
// path was found to be utf8 by httparse
let path = try!(unsafe { ::uri::from_utf8_unchecked(path) });
let subject = RequestLine(
method,
path,
);
headers.extend(HeadersAsBytesIter {
headers: headers_indices[..headers_len].iter(),
slice: slice,
});
Ok(Some((MessageHead {
version: version,
subject: subject,
headers: headers,
}, len)))
}
fn decoder(head: &MessageHead<Self::Incoming>, method: &mut Option<Method>) -> ::Result<Decoder> {
use ::header;
*method = Some(head.subject.0.clone());
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
// 1. (irrelevant to Request)
// 2. (irrelevant to Request)
// 3. Transfer-Encoding: chunked has a chunked body.
// 4. If multiple differing Content-Length headers or invalid, close connection.
// 5. Content-Length header has a sized body.
// 6. Length 0.
// 7. (irrelevant to Request)
if let Some(&header::TransferEncoding(ref encodings)) = head.headers.get() {
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// If Transfer-Encoding header is present, and 'chunked' is
// not the final encoding, and this is a Request, then it is
// mal-formed. A server should responsed with 400 Bad Request.
if encodings.last() == Some(&header::Encoding::Chunked) {
Ok(Decoder::chunked())
} else {
debug!("request with transfer-encoding header, but not chunked, bad request");
Err(::Error::Header)
}
} else if let Some(&header::ContentLength(len)) = head.headers.get() {
Ok(Decoder::length(len))
} else if head.headers.has::<header::ContentLength>() {
debug!("illegal Content-Length: {:?}", head.headers.get_raw("Content-Length"));
Err(::Error::Header)
} else {
Ok(Decoder::length(0))
}
}
fn encode(mut head: MessageHead<Self::Outgoing>, has_body: bool, method: &mut Option<Method>, dst: &mut Vec<u8>) -> Encoder {
trace!("ServerTransaction::encode has_body={}, method={:?}", has_body, method);
let body = ServerTransaction::set_length(&mut head, has_body, method.as_ref());
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
if head.version == ::HttpVersion::Http11 && head.subject == ::StatusCode::Ok {
extend(dst, b"HTTP/1.1 200 OK\r\n");
let _ = write!(FastWrite(dst), "{}", head.headers);
} else {
let _ = write!(FastWrite(dst), "{} {}\r\n{}", head.version, head.subject, head.headers);
}
// using http::h1::date is quite a lot faster than generating a unique Date header each time
// like req/s goes up about 10%
if !head.headers.has::<header::Date>() {
dst.reserve(date::DATE_VALUE_LENGTH + 8);
extend(dst, b"Date: ");
date::extend(dst);
extend(dst, b"\r\n");
}
extend(dst, b"\r\n");
body
}
}
impl ServerTransaction {
fn set_length(head: &mut MessageHead<StatusCode>, has_body: bool, method: Option<&Method>) -> Encoder {
// these are here thanks to borrowck
// `if method == Some(&Method::Get)` says the RHS doesnt live long enough
const HEAD: Option<&'static Method> = Some(&Method::Head);
const CONNECT: Option<&'static Method> = Some(&Method::Connect);
let can_have_body = {
if method == HEAD {
false
} else if method == CONNECT && head.subject.is_success() {
false
} else {
match head.subject {
// TODO: support for 1xx codes needs improvement everywhere
// would be 100...199 => false
StatusCode::NoContent |
StatusCode::NotModified => false,
_ => true,
}
}
};
if has_body && can_have_body {
set_length(&mut head.headers)
} else {
head.headers.remove::<TransferEncoding>();
if can_have_body {
head.headers.set(ContentLength(0));
}
Encoder::length(0)
}
}
}
impl Http1Transaction for ClientTransaction {
type Incoming = RawStatus;
type Outgoing = RequestLine;
fn parse(buf: &mut BytesMut) -> ParseResult<RawStatus> {
if buf.len() == 0 {
return Ok(None);
}
let mut headers_indices = [HeaderIndices {
name: (0, 0),
value: (0, 0)
}; MAX_HEADERS];
let (len, code, reason, version, headers_len) = {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
trace!("Response.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
let mut res = httparse::Response::new(&mut headers);
let bytes = buf.as_ref();
match try!(res.parse(bytes)) {
httparse::Status::Complete(len) => {
trace!("Response.parse Complete({})", len);
let code = res.code.unwrap();
let status = try!(StatusCode::try_from(code).map_err(|_| ::Error::Status));
let reason = match status.canonical_reason() {
Some(reason) if reason == res.reason.unwrap() => Cow::Borrowed(reason),
_ => Cow::Owned(res.reason.unwrap().to_owned())
};
let version = if res.version.unwrap() == 1 { Http11 } else { Http10 };
record_header_indices(bytes, &res.headers, &mut headers_indices);
let headers_len = res.headers.len();
(len, code, reason, version, headers_len)
},
httparse::Status::Partial => return Ok(None),
}
};
let mut headers = Headers::with_capacity(headers_len);
let slice = buf.split_to(len).freeze();
headers.extend(HeadersAsBytesIter {
headers: headers_indices[..headers_len].iter(),
slice: slice,
});
Ok(Some((MessageHead {
version: version,
subject: RawStatus(code, reason),
headers: headers,
}, len)))
}
fn decoder(inc: &MessageHead<Self::Incoming>, method: &mut Option<Method>) -> ::Result<Decoder> {
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
// 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body.
// 2. Status 2xx to a CONNECT cannot have a body.
// 3. Transfer-Encoding: chunked has a chunked body.
// 4. If multiple differing Content-Length headers or invalid, close connection.
// 5. Content-Length header has a sized body.
// 6. (irrelevant to Response)
// 7. Read till EOF.
match *method {
Some(Method::Head) => {
return Ok(Decoder::length(0));
}
Some(Method::Connect) => match inc.subject.0 {
200...299 => {
return Ok(Decoder::length(0));
},
_ => {},
},
Some(_) => {},
None => {
trace!("ClientTransaction::decoder is missing the Method");
}
}
match inc.subject.0 {
100...199 |
204 |
304 => return Ok(Decoder::length(0)),
_ => (),
}
if let Some(&header::TransferEncoding(ref codings)) = inc.headers.get() {
if codings.last() == Some(&header::Encoding::Chunked) {
Ok(Decoder::chunked())
} else {
trace!("not chunked. read till eof");
Ok(Decoder::eof())
}
} else if let Some(&header::ContentLength(len)) = inc.headers.get() {
Ok(Decoder::length(len))
} else if inc.headers.has::<header::ContentLength>() {
debug!("illegal Content-Length: {:?}", inc.headers.get_raw("Content-Length"));
Err(::Error::Header)
} else {
trace!("neither Transfer-Encoding nor Content-Length");
Ok(Decoder::eof())
}
}
fn encode(mut head: MessageHead<Self::Outgoing>, has_body: bool, method: &mut Option<Method>, dst: &mut Vec<u8>) -> Encoder {
trace!("ClientTransaction::encode has_body={}, method={:?}", has_body, method);
*method = Some(head.subject.0.clone());
let body = ClientTransaction::set_length(&mut head, has_body);
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
let _ = write!(FastWrite(dst), "{} {}\r\n{}\r\n", head.subject, head.version, head.headers);
body
}
}
impl ClientTransaction {
fn set_length(head: &mut RequestHead, has_body: bool) -> Encoder {
if has_body {
set_length(&mut head.headers)
} else {
head.headers.remove::<ContentLength>();
head.headers.remove::<TransferEncoding>();
Encoder::length(0)
}
}
}
fn set_length(headers: &mut Headers) -> Encoder {
let len = headers.get::<header::ContentLength>().map(|n| **n);
if let Some(len) = len {
Encoder::length(len)
} else {
let encodings = match headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
if encodings.last() != Some(&header::Encoding::Chunked) {
encodings.push(header::Encoding::Chunked);
}
false
},
None => true
};
if encodings {
headers.set(header::TransferEncoding(vec![header::Encoding::Chunked]));
}
Encoder::chunked()
}
}
#[derive(Clone, Copy)]
struct HeaderIndices {
name: (usize, usize),
value: (usize, usize),
}
fn record_header_indices(bytes: &[u8], headers: &[httparse::Header], indices: &mut [HeaderIndices]) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
struct HeadersAsBytesIter<'a> {
headers: ::std::slice::Iter<'a, HeaderIndices>,
slice: Bytes,
}
impl<'a> Iterator for HeadersAsBytesIter<'a> {
type Item = (&'a str, Bytes);
fn next(&mut self) -> Option<Self::Item> {
self.headers.next().map(|header| {
let name = unsafe {
let bytes = ::std::slice::from_raw_parts(
self.slice.as_ref().as_ptr().offset(header.name.0 as isize),
header.name.1 - header.name.0
);
::std::str::from_utf8_unchecked(bytes)
};
(name, self.slice.slice(header.value.0, header.value.1))
})
}
}
struct FastWrite<'a>(&'a mut Vec<u8>);
impl<'a> fmt::Write for FastWrite<'a> {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
extend(self.0, s.as_bytes());
Ok(())
}
#[inline]
fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
fmt::write(self, args)
}
}
#[inline]
fn extend(dst: &mut Vec<u8>, data: &[u8]) {
dst.extend_from_slice(data);
}
#[cfg(test)]
mod tests {
use bytes::BytesMut;
use proto::{MessageHead, ServerTransaction, ClientTransaction, Http1Transaction};
use header::{ContentLength, TransferEncoding};
#[test]
fn test_parse_request() {
extern crate pretty_env_logger;
let _ = pretty_env_logger::init();
let mut raw = BytesMut::from(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec());
let expected_len = raw.len();
let (req, len) = ServerTransaction::parse(&mut raw).unwrap().unwrap();
assert_eq!(len, expected_len);
assert_eq!(req.subject.0, ::Method::Get);
assert_eq!(req.subject.1, "/echo");
assert_eq!(req.version, ::HttpVersion::Http11);
assert_eq!(req.headers.len(), 1);
assert_eq!(req.headers.get_raw("Host").map(|raw| &raw[0]), Some(b"hyper.rs".as_ref()));
}
#[test]
fn test_parse_response() {
extern crate pretty_env_logger;
let _ = pretty_env_logger::init();
let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec());
let expected_len = raw.len();
let (req, len) = ClientTransaction::parse(&mut raw).unwrap().unwrap();
assert_eq!(len, expected_len);
assert_eq!(req.subject.0, 200);
assert_eq!(req.subject.1, "OK");
assert_eq!(req.version, ::HttpVersion::Http11);
assert_eq!(req.headers.len(), 1);
assert_eq!(req.headers.get_raw("Content-Length").map(|raw| &raw[0]), Some(b"0".as_ref()));
}
#[test]
fn test_parse_request_errors() {
let mut raw = BytesMut::from(b"GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec());
ServerTransaction::parse(&mut raw).unwrap_err();
}
#[test]
fn test_parse_raw_status() {
let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\n\r\n".to_vec());
let (res, _) = ClientTransaction::parse(&mut raw).unwrap().unwrap();
assert_eq!(res.subject.1, "OK");
let mut raw = BytesMut::from(b"HTTP/1.1 200 Howdy\r\n\r\n".to_vec());
let (res, _) = ClientTransaction::parse(&mut raw).unwrap().unwrap();
assert_eq!(res.subject.1, "Howdy");
}
#[test]
fn test_decoder_request() {
use super::Decoder;
let method = &mut None;
let mut head = MessageHead::<::proto::RequestLine>::default();
head.subject.0 = ::Method::Get;
assert_eq!(Decoder::length(0), ServerTransaction::decoder(&head, method).unwrap());
assert_eq!(*method, Some(::Method::Get));
head.subject.0 = ::Method::Post;
assert_eq!(Decoder::length(0), ServerTransaction::decoder(&head, method).unwrap());
assert_eq!(*method, Some(::Method::Post));
head.headers.set(TransferEncoding::chunked());
assert_eq!(Decoder::chunked(), ServerTransaction::decoder(&head, method).unwrap());
// transfer-encoding and content-length = chunked
head.headers.set(ContentLength(10));
assert_eq!(Decoder::chunked(), ServerTransaction::decoder(&head, method).unwrap());
head.headers.remove::<TransferEncoding>();
assert_eq!(Decoder::length(10), ServerTransaction::decoder(&head, method).unwrap());
head.headers.set_raw("Content-Length", vec![b"5".to_vec(), b"5".to_vec()]);
assert_eq!(Decoder::length(5), ServerTransaction::decoder(&head, method).unwrap());
head.headers.set_raw("Content-Length", vec![b"10".to_vec(), b"11".to_vec()]);
ServerTransaction::decoder(&head, method).unwrap_err();
head.headers.remove::<ContentLength>();
head.headers.set_raw("Transfer-Encoding", "gzip");
ServerTransaction::decoder(&head, method).unwrap_err();
}
#[test]
fn test_decoder_response() {
use super::Decoder;
let method = &mut Some(::Method::Get);
let mut head = MessageHead::<::proto::RawStatus>::default();
head.subject.0 = 204;
assert_eq!(Decoder::length(0), ClientTransaction::decoder(&head, method).unwrap());
head.subject.0 = 304;
assert_eq!(Decoder::length(0), ClientTransaction::decoder(&head, method).unwrap());
head.subject.0 = 200;
assert_eq!(Decoder::eof(), ClientTransaction::decoder(&head, method).unwrap());
*method = Some(::Method::Head);
assert_eq!(Decoder::length(0), ClientTransaction::decoder(&head, method).unwrap());
*method = Some(::Method::Connect);
assert_eq!(Decoder::length(0), ClientTransaction::decoder(&head, method).unwrap());
// CONNECT receiving non 200 can have a body
head.subject.0 = 404;
head.headers.set(ContentLength(10));
assert_eq!(Decoder::length(10), ClientTransaction::decoder(&head, method).unwrap());
head.headers.remove::<ContentLength>();
*method = Some(::Method::Get);
head.headers.set(TransferEncoding::chunked());
assert_eq!(Decoder::chunked(), ClientTransaction::decoder(&head, method).unwrap());
// transfer-encoding and content-length = chunked
head.headers.set(ContentLength(10));
assert_eq!(Decoder::chunked(), ClientTransaction::decoder(&head, method).unwrap());
head.headers.remove::<TransferEncoding>();
assert_eq!(Decoder::length(10), ClientTransaction::decoder(&head, method).unwrap());
head.headers.set_raw("Content-Length", vec![b"5".to_vec(), b"5".to_vec()]);
assert_eq!(Decoder::length(5), ClientTransaction::decoder(&head, method).unwrap());
head.headers.set_raw("Content-Length", vec![b"10".to_vec(), b"11".to_vec()]);
ClientTransaction::decoder(&head, method).unwrap_err();
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn bench_parse_incoming(b: &mut Bencher) {
let mut raw = BytesMut::from(
b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\
I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\
_up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\
foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \
hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \
utf8\r\nAccept-Encoding: *\r\nAccess-Control-Allow-\
Credentials: None\r\nAccess-Control-Allow-Origin: None\r\n\
Access-Control-Allow-Methods: None\r\nAccess-Control-Allow-\
Headers: None\r\nContent-Encoding: utf8\r\nContent-Security-\
Policy: None\r\nContent-Type: text/html\r\nOrigin: hyper\
\r\nSec-Websocket-Extensions: It looks super important!\r\n\
Sec-Websocket-Origin: hyper\r\nSec-Websocket-Version: 4.3\r\
\nStrict-Transport-Security: None\r\nUser-Agent: hyper\r\n\
X-Content-Duration: None\r\nX-Content-Security-Policy: None\
\r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \
Something important obviously\r\nX-Requested-With: Nothing\
\r\n\r\n".to_vec()
);
let len = raw.len();
b.bytes = len as u64;
b.iter(|| {
ServerTransaction::parse(&mut raw).unwrap();
restart(&mut raw, len);
});
fn restart(b: &mut BytesMut, len: usize) {
b.reserve(1);
unsafe {
b.set_len(len);
}
}
}
#[cfg(feature = "nightly")]
#[bench]
fn bench_server_transaction_encode(b: &mut Bencher) {
use header::{Headers, ContentLength, ContentType};
use ::{StatusCode, HttpVersion};
let len = 108;
b.bytes = len as u64;
let mut head = MessageHead {
subject: StatusCode::Ok,
headers: Headers::new(),
version: HttpVersion::Http11,
};
head.headers.set(ContentLength(10));
head.headers.set(ContentType::json());
b.iter(|| {
let mut vec = Vec::new();
ServerTransaction::encode(head.clone(), true, &mut None, &mut vec);
assert_eq!(vec.len(), len);
::test::black_box(vec);
})
}
}

1
src/proto/h2/mod.rs Normal file
View File

@@ -0,0 +1 @@

386
src/proto/io.rs Normal file
View File

@@ -0,0 +1,386 @@
use std::cmp;
use std::fmt;
use std::io::{self, Write};
use std::ptr;
use futures::{Async, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use super::{Http1Transaction, MessageHead};
use bytes::{BytesMut, Bytes};
const INIT_BUFFER_SIZE: usize = 8192;
pub const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
pub struct Buffered<T> {
flush_pipeline: bool,
io: T,
read_blocked: bool,
read_buf: BytesMut,
write_buf: WriteBuf,
}
impl<T> fmt::Debug for Buffered<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Buffered")
.field("read_buf", &self.read_buf)
.field("write_buf", &self.write_buf)
.finish()
}
}
impl<T: AsyncRead + AsyncWrite> Buffered<T> {
pub fn new(io: T) -> Buffered<T> {
Buffered {
flush_pipeline: false,
io: io,
read_buf: BytesMut::with_capacity(0),
write_buf: WriteBuf::new(),
read_blocked: false,
}
}
pub fn set_flush_pipeline(&mut self, enabled: bool) {
self.flush_pipeline = enabled;
}
pub fn read_buf(&self) -> &[u8] {
self.read_buf.as_ref()
}
pub fn write_buf_mut(&mut self) -> &mut Vec<u8> {
self.write_buf.maybe_reset();
self.write_buf.maybe_reserve(0);
&mut self.write_buf.0.bytes
}
pub fn consume_leading_lines(&mut self) {
if !self.read_buf.is_empty() {
let mut i = 0;
while i < self.read_buf.len() {
match self.read_buf[i] {
b'\r' | b'\n' => i += 1,
_ => break,
}
}
self.read_buf.split_to(i);
}
}
pub fn parse<S: Http1Transaction>(&mut self) -> Poll<MessageHead<S::Incoming>, ::Error> {
loop {
match try!(S::parse(&mut self.read_buf)) {
Some(head) => {
//trace!("parsed {} bytes out of {}", len, self.read_buf.len());
return Ok(Async::Ready(head.0))
},
None => {
if self.read_buf.capacity() >= MAX_BUFFER_SIZE {
debug!("MAX_BUFFER_SIZE reached, closing");
return Err(::Error::TooLarge);
}
},
}
match try_ready!(self.read_from_io()) {
0 => {
trace!("parse eof");
//TODO: With Rust 1.14, this can be Error::from(ErrorKind)
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, ParseEof).into());
}
_ => {},
}
}
}
pub fn read_from_io(&mut self) -> Poll<usize, io::Error> {
use bytes::BufMut;
self.read_blocked = false;
//TODO: use io.read_buf(), so we don't have to zero memory
//Reason this doesn't use it yet is because benchmarks show the
//slightest **decrease** in performance. Switching should be done
//when it doesn't cost anything.
if self.read_buf.remaining_mut() < INIT_BUFFER_SIZE {
self.read_buf.reserve(INIT_BUFFER_SIZE);
unsafe { // Zero out unused memory
let buf = self.read_buf.bytes_mut();
let len = buf.len();
ptr::write_bytes(buf.as_mut_ptr(), 0, len);
}
}
unsafe {
let n = match self.io.read(self.read_buf.bytes_mut()) {
Ok(n) => n,
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.read_blocked = true;
return Ok(Async::NotReady);
}
return Err(e)
}
};
self.read_buf.advance_mut(n);
Ok(Async::Ready(n))
}
}
pub fn buffer<B: AsRef<[u8]>>(&mut self, buf: B) -> usize {
self.write_buf.buffer(buf.as_ref())
}
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
pub fn is_read_blocked(&self) -> bool {
self.read_blocked
}
}
impl<T: Write> Write for Buffered<T> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let n = self.write_buf.buffer(data);
if n == 0 {
Err(io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
fn flush(&mut self) -> io::Result<()> {
if self.flush_pipeline && self.read_buf.is_empty() {
Ok(())
} else if self.write_buf.remaining() == 0 {
self.io.flush()
} else {
loop {
let n = try!(self.write_buf.write_into(&mut self.io));
trace!("flushed {} bytes", n);
if self.write_buf.remaining() == 0 {
break;
}
}
self.io.flush()
}
}
}
pub trait MemRead {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error>;
}
impl<T: AsyncRead + AsyncWrite> MemRead for Buffered<T> {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
trace!("Buffered.read_mem read_buf={}, wanted={}", self.read_buf.len(), len);
if !self.read_buf.is_empty() {
let n = ::std::cmp::min(len, self.read_buf.len());
trace!("Buffered.read_mem read_buf is not empty, slicing {}", n);
Ok(Async::Ready(self.read_buf.split_to(n).freeze()))
} else {
let n = try_ready!(self.read_from_io());
Ok(Async::Ready(self.read_buf.split_to(::std::cmp::min(len, n)).freeze()))
}
}
}
#[derive(Clone)]
pub struct Cursor<T> {
bytes: T,
pos: usize,
}
impl<T: AsRef<[u8]>> Cursor<T> {
pub fn new(bytes: T) -> Cursor<T> {
Cursor {
bytes: bytes,
pos: 0,
}
}
pub fn has_started(&self) -> bool {
self.pos != 0
}
pub fn is_written(&self) -> bool {
trace!("Cursor::is_written pos = {}, len = {}", self.pos, self.bytes.as_ref().len());
self.pos >= self.bytes.as_ref().len()
}
pub fn write_to<W: Write>(&mut self, dst: &mut W) -> io::Result<usize> {
if self.remaining() == 0 {
Ok(0)
} else {
dst.write(&self.bytes.as_ref()[self.pos..]).map(|n| {
self.pos += n;
n
})
}
}
fn remaining(&self) -> usize {
self.bytes.as_ref().len() - self.pos
}
#[inline]
pub fn buf(&self) -> &[u8] {
&self.bytes.as_ref()[self.pos..]
}
#[inline]
pub fn consume(&mut self, num: usize) {
trace!("Cursor::consume({})", num);
self.pos = ::std::cmp::min(self.bytes.as_ref().len(), self.pos + num);
}
}
impl<T: AsRef<[u8]>> fmt::Debug for Cursor<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Cursor")
.field("pos", &self.pos)
.field("len", &self.bytes.as_ref().len())
.finish()
}
}
pub trait AtomicWrite {
fn write_atomic(&mut self, data: &[&[u8]]) -> io::Result<usize>;
}
/*
#[cfg(not(windows))]
impl<T: Write + ::vecio::Writev> AtomicWrite for T {
fn write_atomic(&mut self, bufs: &[&[u8]]) -> io::Result<usize> {
self.writev(bufs)
}
}
#[cfg(windows)]
*/
impl<T: Write> AtomicWrite for T {
fn write_atomic(&mut self, bufs: &[&[u8]]) -> io::Result<usize> {
if bufs.len() == 1 {
self.write(bufs[0])
} else {
let vec = bufs.concat();
self.write(&vec)
}
}
}
//}
// an internal buffer to collect writes before flushes
#[derive(Debug)]
struct WriteBuf(Cursor<Vec<u8>>);
impl WriteBuf {
fn new() -> WriteBuf {
WriteBuf(Cursor::new(Vec::new()))
}
fn write_into<W: Write>(&mut self, w: &mut W) -> io::Result<usize> {
self.0.write_to(w)
}
fn buffer(&mut self, data: &[u8]) -> usize {
trace!("WriteBuf::buffer() len = {:?}", data.len());
self.maybe_reset();
self.maybe_reserve(data.len());
let vec = &mut self.0.bytes;
let len = cmp::min(vec.capacity() - vec.len(), data.len());
assert!(vec.capacity() - vec.len() >= len);
unsafe {
// in rust 1.9, we could use slice::copy_from_slice
ptr::copy(
data.as_ptr(),
vec.as_mut_ptr().offset(vec.len() as isize),
len
);
let new_len = vec.len() + len;
vec.set_len(new_len);
}
len
}
fn remaining(&self) -> usize {
self.0.remaining()
}
#[inline]
fn maybe_reserve(&mut self, needed: usize) {
let vec = &mut self.0.bytes;
let cap = vec.capacity();
if cap == 0 {
let init = cmp::min(MAX_BUFFER_SIZE, cmp::max(INIT_BUFFER_SIZE, needed));
trace!("WriteBuf reserving initial {}", init);
vec.reserve(init);
} else if cap < MAX_BUFFER_SIZE {
vec.reserve(cmp::min(needed, MAX_BUFFER_SIZE - cap));
trace!("WriteBuf reserved {}", vec.capacity() - cap);
}
}
fn maybe_reset(&mut self) {
if self.0.pos != 0 && self.0.remaining() == 0 {
self.0.pos = 0;
unsafe {
self.0.bytes.set_len(0);
}
}
}
}
#[derive(Debug)]
struct ParseEof;
impl fmt::Display for ParseEof {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("parse eof")
}
}
impl ::std::error::Error for ParseEof {
fn description(&self) -> &str {
"parse eof"
}
}
// TODO: Move tests to their own mod
#[cfg(test)]
use std::io::Read;
#[cfg(test)]
impl<T: Read> MemRead for ::mock::AsyncIo<T> {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
let mut v = vec![0; len];
let n = try_nb!(self.read(v.as_mut_slice()));
Ok(Async::Ready(BytesMut::from(&v[..n]).freeze()))
}
}
#[test]
fn test_iobuf_write_empty_slice() {
use mock::{AsyncIo, Buf as MockBuf};
let mut mock = AsyncIo::new(MockBuf::new(), 256);
mock.error(io::Error::new(io::ErrorKind::Other, "logic error"));
let mut io_buf = Buffered::new(mock);
// underlying io will return the logic error upon write,
// so we are testing that the io_buf does not trigger a write
// when there is nothing to flush
io_buf.flush().expect("should short-circuit flush");
}
#[test]
fn test_parse_reads_until_blocked() {
use mock::{AsyncIo, Buf as MockBuf};
// missing last line ending
let raw = "HTTP/1.1 200 OK\r\n";
let mock = AsyncIo::new(MockBuf::wrap(raw.into()), raw.len());
let mut buffered = Buffered::new(mock);
assert_eq!(buffered.parse::<super::ClientTransaction>().unwrap(), Async::NotReady);
assert!(buffered.io.blocked());
}

179
src/proto/mod.rs Normal file
View File

@@ -0,0 +1,179 @@
//! Pieces pertaining to the HTTP message protocol.
use std::borrow::Cow;
use std::fmt;
use bytes::BytesMut;
use header::{Connection, ConnectionOption, Expect};
use header::Headers;
use method::Method;
use status::StatusCode;
use uri::Uri;
use version::HttpVersion;
use version::HttpVersion::{Http10, Http11};
pub use self::conn::{Conn, KeepAlive, KA};
pub use self::body::{Body, TokioBody};
pub use self::chunk::Chunk;
mod body;
mod chunk;
mod conn;
mod io;
mod h1;
//mod h2;
pub mod request;
pub mod response;
/// An Incoming Message head. Includes request/status line, and headers.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct MessageHead<S> {
/// HTTP version of the message.
pub version: HttpVersion,
/// Subject (request line or status line) of Incoming message.
pub subject: S,
/// Headers of the Incoming message.
pub headers: Headers
}
/// An incoming request message.
pub type RequestHead = MessageHead<RequestLine>;
#[derive(Debug, Default, PartialEq)]
pub struct RequestLine(pub Method, pub Uri);
impl fmt::Display for RequestLine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.0, self.1)
}
}
/// An incoming response message.
pub type ResponseHead = MessageHead<RawStatus>;
impl<S> MessageHead<S> {
pub fn should_keep_alive(&self) -> bool {
should_keep_alive(self.version, &self.headers)
}
pub fn expecting_continue(&self) -> bool {
expecting_continue(self.version, &self.headers)
}
}
impl ResponseHead {
/// Converts this head's RawStatus into a StatusCode.
#[inline]
pub fn status(&self) -> StatusCode {
self.subject.status()
}
}
/// The raw status code and reason-phrase.
#[derive(Clone, PartialEq, Debug)]
pub struct RawStatus(pub u16, pub Cow<'static, str>);
impl RawStatus {
/// Converts this into a StatusCode.
#[inline]
pub fn status(&self) -> StatusCode {
StatusCode::try_from(self.0).unwrap()
}
}
impl fmt::Display for RawStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.0, self.1)
}
}
impl From<StatusCode> for RawStatus {
fn from(status: StatusCode) -> RawStatus {
RawStatus(status.into(), Cow::Borrowed(status.canonical_reason().unwrap_or("")))
}
}
impl Default for RawStatus {
fn default() -> RawStatus {
RawStatus(200, Cow::Borrowed("OK"))
}
}
impl From<MessageHead<::StatusCode>> for MessageHead<RawStatus> {
fn from(head: MessageHead<::StatusCode>) -> MessageHead<RawStatus> {
MessageHead {
subject: head.subject.into(),
version: head.version,
headers: head.headers,
}
}
}
/// Checks if a connection should be kept alive.
#[inline]
pub fn should_keep_alive(version: HttpVersion, headers: &Headers) -> bool {
let ret = match (version, headers.get::<Connection>()) {
(Http10, None) => false,
(Http10, Some(conn)) if !conn.contains(&ConnectionOption::KeepAlive) => false,
(Http11, Some(conn)) if conn.contains(&ConnectionOption::Close) => false,
_ => true
};
trace!("should_keep_alive(version={:?}, header={:?}) = {:?}", version, headers.get::<Connection>(), ret);
ret
}
/// Checks if a connection is expecting a `100 Continue` before sending its body.
#[inline]
pub fn expecting_continue(version: HttpVersion, headers: &Headers) -> bool {
let ret = match (version, headers.get::<Expect>()) {
(Http11, Some(&Expect::Continue)) => true,
_ => false
};
trace!("expecting_continue(version={:?}, header={:?}) = {:?}", version, headers.get::<Expect>(), ret);
ret
}
#[derive(Debug)]
pub enum ServerTransaction {}
#[derive(Debug)]
pub enum ClientTransaction {}
pub trait Http1Transaction {
type Incoming;
type Outgoing: Default;
fn parse(bytes: &mut BytesMut) -> ParseResult<Self::Incoming>;
fn decoder(head: &MessageHead<Self::Incoming>, method: &mut Option<::Method>) -> ::Result<h1::Decoder>;
fn encode(head: MessageHead<Self::Outgoing>, has_body: bool, method: &mut Option<Method>, dst: &mut Vec<u8>) -> h1::Encoder;
}
pub type ParseResult<T> = ::Result<Option<(MessageHead<T>, usize)>>;
#[test]
fn test_should_keep_alive() {
let mut headers = Headers::new();
assert!(!should_keep_alive(Http10, &headers));
assert!(should_keep_alive(Http11, &headers));
headers.set(Connection::close());
assert!(!should_keep_alive(Http10, &headers));
assert!(!should_keep_alive(Http11, &headers));
headers.set(Connection::keep_alive());
assert!(should_keep_alive(Http10, &headers));
assert!(should_keep_alive(Http11, &headers));
}
#[test]
fn test_expecting_continue() {
let mut headers = Headers::new();
assert!(!expecting_continue(Http10, &headers));
assert!(!expecting_continue(Http11, &headers));
headers.set(Expect::Continue);
assert!(!expecting_continue(Http10, &headers));
assert!(expecting_continue(Http11, &headers));
}

309
src/proto/request.rs Normal file
View File

@@ -0,0 +1,309 @@
use std::fmt;
#[cfg(feature = "compat")]
use std::mem::replace;
use std::net::SocketAddr;
#[cfg(feature = "compat")]
use http_types;
use header::Headers;
use proto::{Body, MessageHead, RequestHead, RequestLine};
use method::Method;
use uri::{self, Uri};
use version::HttpVersion;
/// An HTTP Request
pub struct Request<B = Body> {
method: Method,
uri: Uri,
version: HttpVersion,
headers: Headers,
body: Option<B>,
is_proxy: bool,
remote_addr: Option<SocketAddr>,
}
impl<B> Request<B> {
/// Construct a new Request.
#[inline]
pub fn new(method: Method, uri: Uri) -> Request<B> {
Request {
method: method,
uri: uri,
version: HttpVersion::default(),
headers: Headers::new(),
body: None,
is_proxy: false,
remote_addr: None,
}
}
/// Read the Request Uri.
#[inline]
pub fn uri(&self) -> &Uri { &self.uri }
/// Read the Request Version.
#[inline]
pub fn version(&self) -> HttpVersion { self.version }
/// Read the Request headers.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Read the Request method.
#[inline]
pub fn method(&self) -> &Method { &self.method }
/// Read the Request body.
#[inline]
pub fn body_ref(&self) -> Option<&B> { self.body.as_ref() }
/// The remote socket address of this request
///
/// This is an `Option`, because some underlying transports may not have
/// a socket address, such as Unix Sockets.
///
/// This field is not used for outgoing requests.
#[inline]
pub fn remote_addr(&self) -> Option<SocketAddr> { self.remote_addr }
/// The target path of this Request.
#[inline]
pub fn path(&self) -> &str {
self.uri.path()
}
/// The query string of this Request.
#[inline]
pub fn query(&self) -> Option<&str> {
self.uri.query()
}
/// Set the Method of this request.
#[inline]
pub fn set_method(&mut self, method: Method) { self.method = method; }
/// Get a mutable reference to the Request headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers }
/// Set the `Uri` of this request.
#[inline]
pub fn set_uri(&mut self, uri: Uri) { self.uri = uri; }
/// Set the `HttpVersion` of this request.
#[inline]
pub fn set_version(&mut self, version: HttpVersion) { self.version = version; }
/// Set the body of the request.
///
/// By default, the body will be sent using `Transfer-Encoding: chunked`. To
/// override this behavior, manually set a [`ContentLength`] header with the
/// length of `body`.
#[inline]
pub fn set_body<T: Into<B>>(&mut self, body: T) { self.body = Some(body.into()); }
/// Set that the URI should use the absolute form.
///
/// This is only needed when talking to HTTP/1 proxies to URLs not
/// protected by TLS.
#[inline]
pub fn set_proxy(&mut self, is_proxy: bool) { self.is_proxy = is_proxy; }
}
impl Request<Body> {
/// Deconstruct this Request into its pieces.
///
/// Modifying these pieces will have no effect on how hyper behaves.
#[inline]
pub fn deconstruct(self) -> (Method, Uri, HttpVersion, Headers, Body) {
(self.method, self.uri, self.version, self.headers, self.body.unwrap_or_default())
}
/// Take the Request body.
#[inline]
pub fn body(self) -> Body { self.body.unwrap_or_default() }
}
impl<B> fmt::Debug for Request<B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Request")
.field("method", &self.method)
.field("uri", &self.uri)
.field("version", &self.version)
.field("remote_addr", &self.remote_addr)
.field("headers", &self.headers)
.finish()
}
}
#[cfg(feature = "compat")]
impl From<Request> for http_types::Request<Body> {
fn from(from_req: Request) -> http_types::Request<Body> {
let (m, u, v, h, b) = from_req.deconstruct();
let to_req = http_types::Request::new(());
let (mut to_parts, _) = to_req.into_parts();
to_parts.method = m.into();
to_parts.uri = u.into();
to_parts.version = v.into();
to_parts.headers = h.into();
http_types::Request::from_parts(to_parts, b)
}
}
#[cfg(feature = "compat")]
impl<B> From<http_types::Request<B>> for Request<B> {
fn from(from_req: http_types::Request<B>) -> Request<B> {
let (from_parts, body) = from_req.into_parts();
let mut to_req = Request::new(from_parts.method.into(), from_parts.uri.into());
to_req.set_version(from_parts.version.into());
replace(to_req.headers_mut(), from_parts.headers.into());
to_req.set_body(body);
to_req
}
}
/// Constructs a request using a received ResponseHead and optional body
pub fn from_wire<B>(addr: Option<SocketAddr>, incoming: RequestHead, body: B) -> Request<B> {
let MessageHead { version, subject: RequestLine(method, uri), headers } = incoming;
Request::<B> {
method: method,
uri: uri,
headers: headers,
version: version,
remote_addr: addr,
body: Some(body),
is_proxy: false,
}
}
pub fn split<B>(req: Request<B>) -> (RequestHead, Option<B>) {
let uri = if req.is_proxy {
req.uri
} else {
uri::origin_form(&req.uri)
};
let head = RequestHead {
subject: ::proto::RequestLine(req.method, uri),
headers: req.headers,
version: req.version,
};
(head, req.body)
}
#[cfg(test)]
mod tests {
/*
use std::io::Write;
use std::str::from_utf8;
use Url;
use method::Method::{Get, Head, Post};
use mock::{MockStream, MockConnector};
use net::Fresh;
use header::{ContentLength,TransferEncoding,Encoding};
use url::form_urlencoded;
use super::Request;
use http::h1::Http11Message;
fn run_request(req: Request<Fresh>) -> Vec<u8> {
let req = req.start().unwrap();
let message = req.message;
let mut message = message.downcast::<Http11Message>().ok().unwrap();
message.flush_outgoing().unwrap();
let stream = *message
.into_inner().downcast::<MockStream>().ok().unwrap();
stream.write
}
fn assert_no_body(s: &str) {
assert!(!s.contains("Content-Length:"));
assert!(!s.contains("Transfer-Encoding:"));
}
#[test]
fn test_get_empty_body() {
let req = Request::with_connector(
Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert_no_body(s);
}
#[test]
fn test_head_empty_body() {
let req = Request::with_connector(
Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector
).unwrap();
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert_no_body(s);
}
#[test]
fn test_url_query() {
let url = Url::parse("http://example.dom?q=value").unwrap();
let req = Request::with_connector(
Get, url, &mut MockConnector
).unwrap();
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert!(s.contains("?q=value"));
}
#[test]
fn test_post_content_length() {
let url = Url::parse("http://example.dom").unwrap();
let mut req = Request::with_connector(
Post, url, &mut MockConnector
).unwrap();
let mut body = String::new();
form_urlencoded::Serializer::new(&mut body).append_pair("q", "value");
req.headers_mut().set(ContentLength(body.len() as u64));
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert!(s.contains("Content-Length:"));
}
#[test]
fn test_post_chunked() {
let url = Url::parse("http://example.dom").unwrap();
let req = Request::with_connector(
Post, url, &mut MockConnector
).unwrap();
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert!(!s.contains("Content-Length:"));
}
#[test]
fn test_host_header() {
let url = Url::parse("http://example.dom").unwrap();
let req = Request::with_connector(
Get, url, &mut MockConnector
).unwrap();
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
assert!(s.contains("Host: example.dom"));
}
#[test]
fn test_proxy() {
let url = Url::parse("http://example.dom").unwrap();
let mut req = Request::with_connector(
Get, url, &mut MockConnector
).unwrap();
req.message.set_proxied(true);
let bytes = run_request(req);
let s = from_utf8(&bytes[..]).unwrap();
let request_line = "GET http://example.dom/ HTTP/1.1";
assert_eq!(&s[..request_line.len()], request_line);
assert!(s.contains("Host: example.dom"));
}
*/
}

212
src/proto/response.rs Normal file
View File

@@ -0,0 +1,212 @@
use std::fmt;
#[cfg(feature = "compat")]
use std::mem::replace;
#[cfg(feature = "compat")]
use http_types;
use header::{Header, Headers};
use proto::{MessageHead, ResponseHead, Body};
use status::StatusCode;
use version::HttpVersion;
/// An HTTP Response
pub struct Response<B = Body> {
version: HttpVersion,
headers: Headers,
status: StatusCode,
#[cfg(feature = "raw_status")]
raw_status: ::proto::RawStatus,
body: Option<B>,
}
impl<B> Response<B> {
/// Constructs a default response
#[inline]
pub fn new() -> Response<B> {
Response::default()
}
/// Get the HTTP version of this response.
#[inline]
pub fn version(&self) -> HttpVersion { self.version }
/// Get the headers from the response.
#[inline]
pub fn headers(&self) -> &Headers { &self.headers }
/// Get a mutable reference to the headers.
#[inline]
pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers }
/// Get the status from the server.
#[inline]
pub fn status(&self) -> StatusCode { self.status }
/// Get the raw status code and reason.
///
/// This method is only useful when inspecting the raw subject line from
/// a received response.
#[inline]
#[cfg(feature = "raw_status")]
pub fn status_raw(&self) -> &::proto::RawStatus { &self.raw_status }
/// Set the `StatusCode` for this response.
#[inline]
pub fn set_status(&mut self, status: StatusCode) {
self.status = status;
}
/// Set the status and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_status(mut self, status: StatusCode) -> Self {
self.set_status(status);
self
}
/// Set a header and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_header<H: Header>(mut self, header: H) -> Self {
self.headers.set(header);
self
}
/// Set the headers and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_headers(mut self, headers: Headers) -> Self {
self.headers = headers;
self
}
/// Set the body.
#[inline]
pub fn set_body<T: Into<B>>(&mut self, body: T) {
self.body = Some(body.into());
}
/// Set the body and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_body<T: Into<B>>(mut self, body: T) -> Self {
self.set_body(body);
self
}
/// Read the body.
#[inline]
pub fn body_ref(&self) -> Option<&B> { self.body.as_ref() }
}
impl Response<Body> {
/// Take the `Body` of this response.
#[inline]
pub fn body(self) -> Body {
self.body.unwrap_or_default()
}
}
#[cfg(not(feature = "raw_status"))]
impl<B> Default for Response<B> {
fn default() -> Response<B> {
Response::<B> {
version: Default::default(),
headers: Default::default(),
status: Default::default(),
body: None,
}
}
}
#[cfg(feature = "raw_status")]
impl<B> Default for Response<B> {
fn default() -> Response<B> {
Response::<B> {
version: Default::default(),
headers: Default::default(),
status: Default::default(),
raw_status: Default::default(),
body: None,
}
}
}
impl fmt::Debug for Response {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Response")
.field("status", &self.status)
.field("version", &self.version)
.field("headers", &self.headers)
.finish()
}
}
#[cfg(feature = "compat")]
impl<B> From<http_types::Response<B>> for Response<B> {
fn from(from_res: http_types::Response<B>) -> Response<B> {
let (from_parts, body) = from_res.into_parts();
let mut to_res = Response::new();
to_res.version = from_parts.version.into();
to_res.set_status(from_parts.status.into());
replace(to_res.headers_mut(), from_parts.headers.into());
to_res.with_body(body)
}
}
#[cfg(feature = "compat")]
impl From<Response> for http_types::Response<Body> {
fn from(mut from_res: Response) -> http_types::Response<Body> {
let (mut to_parts, ()) = http_types::Response::new(()).into_parts();
to_parts.version = from_res.version().into();
to_parts.status = from_res.status().into();
let from_headers = replace(from_res.headers_mut(), Headers::new());
to_parts.headers = from_headers.into();
http_types::Response::from_parts(to_parts, from_res.body())
}
}
/// Constructs a response using a received ResponseHead and optional body
#[inline]
#[cfg(not(feature = "raw_status"))]
pub fn from_wire<B>(incoming: ResponseHead, body: Option<B>) -> Response<B> {
let status = incoming.status();
Response::<B> {
status: status,
version: incoming.version,
headers: incoming.headers,
body: body,
}
}
/// Constructs a response using a received ResponseHead and optional body
#[inline]
#[cfg(feature = "raw_status")]
pub fn from_wire<B>(incoming: ResponseHead, body: Option<B>) -> Response<B> {
let status = incoming.status();
Response::<B> {
status: status,
version: incoming.version,
headers: incoming.headers,
raw_status: incoming.subject,
body: body,
}
}
/// Splits this response into a MessageHead<StatusCode> and its body
#[inline]
pub fn split<B>(res: Response<B>) -> (MessageHead<StatusCode>, Option<B>) {
let head = MessageHead::<StatusCode> {
version: res.version,
headers: res.headers,
subject: res.status
};
(head, res.body)
}