feat(lib): switch to non-blocking (asynchronous) IO

BREAKING CHANGE: This breaks a lot of the Client and Server APIs.
  Check the documentation for how Handlers can be used for asynchronous
  events.
This commit is contained in:
Sean McArthur
2016-05-03 20:45:43 -07:00
parent 1ec56fe6b6
commit d35992d019
65 changed files with 5599 additions and 5023 deletions

293
src/http/h1/decode.rs Normal file
View File

@@ -0,0 +1,293 @@
use std::cmp;
use std::io::{self, Read};
use self::Kind::{Length, Chunked, Eof};
/// Decoders to handle different Transfer-Encodings.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Debug, Clone)]
pub struct Decoder {
kind: Kind,
}
impl Decoder {
pub fn length(x: u64) -> Decoder {
Decoder {
kind: Kind::Length(x)
}
}
pub fn chunked() -> Decoder {
Decoder {
kind: Kind::Chunked(None)
}
}
pub fn eof() -> Decoder {
Decoder {
kind: Kind::Eof(false)
}
}
}
#[derive(Debug, Clone)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive integer.
Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(Option<u64>),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof(bool),
}
impl Decoder {
pub fn is_eof(&self) -> bool {
trace!("is_eof? {:?}", self);
match self.kind {
Length(0) |
Chunked(Some(0)) |
Eof(true) => true,
_ => false
}
}
}
impl Decoder {
pub fn decode<R: Read>(&mut self, body: &mut R, buf: &mut [u8]) -> io::Result<usize> {
match self.kind {
Length(ref mut remaining) => {
trace!("Sized read, remaining={:?}", remaining);
if *remaining == 0 {
Ok(0)
} else {
let to_read = cmp::min(*remaining as usize, buf.len());
let num = try!(body.read(&mut buf[..to_read])) as u64;
trace!("Length read: {}", num);
if num > *remaining {
*remaining = 0;
} else if num == 0 {
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
} else {
*remaining -= num;
}
Ok(num as usize)
}
},
Chunked(ref mut opt_remaining) => {
let mut rem = match *opt_remaining {
Some(ref rem) => *rem,
// None means we don't know the size of the next chunk
None => try!(read_chunk_size(body))
};
trace!("Chunked read, remaining={:?}", rem);
if rem == 0 {
*opt_remaining = Some(0);
// chunk of size 0 signals the end of the chunked stream
// if the 0 digit was missing from the stream, it would
// be an InvalidInput error instead.
trace!("end of chunked");
return Ok(0)
}
let to_read = cmp::min(rem as usize, buf.len());
let count = try!(body.read(&mut buf[..to_read])) as u64;
if count == 0 {
*opt_remaining = Some(0);
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
}
rem -= count;
*opt_remaining = if rem > 0 {
Some(rem)
} else {
try!(eat(body, b"\r\n"));
None
};
Ok(count as usize)
},
Eof(ref mut is_eof) => {
match body.read(buf) {
Ok(0) => {
*is_eof = true;
Ok(0)
}
other => other
}
},
}
}
}
fn eat<R: Read>(rdr: &mut R, bytes: &[u8]) -> io::Result<()> {
let mut buf = [0];
for &b in bytes.iter() {
match try!(rdr.read(&mut buf)) {
1 if buf[0] == b => (),
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid characters found")),
}
}
Ok(())
}
/// Chunked chunks start with 1*HEXDIGIT, indicating the size of the chunk.
fn read_chunk_size<R: Read>(rdr: &mut R) -> io::Result<u64> {
macro_rules! byte (
($rdr:ident) => ({
let mut buf = [0];
match try!($rdr.read(&mut buf)) {
1 => buf[0],
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line")),
}
})
);
let mut size = 0u64;
let radix = 16;
let mut in_ext = false;
let mut in_chunk_size = true;
loop {
match byte!(rdr) {
b@b'0'...b'9' if in_chunk_size => {
size *= radix;
size += (b - b'0') as u64;
},
b@b'a'...b'f' if in_chunk_size => {
size *= radix;
size += (b + 10 - b'a') as u64;
},
b@b'A'...b'F' if in_chunk_size => {
size *= radix;
size += (b + 10 - b'A') as u64;
},
b'\r' => {
match byte!(rdr) {
b'\n' => break,
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line"))
}
},
// If we weren't in the extension yet, the ";" signals its start
b';' if !in_ext => {
in_ext = true;
in_chunk_size = false;
},
// "Linear white space" is ignored between the chunk size and the
// extension separator token (";") due to the "implied *LWS rule".
b'\t' | b' ' if !in_ext & !in_chunk_size => {},
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' if in_chunk_size => in_chunk_size = false,
// We allow any arbitrary octet once we are in the extension, since
// they all get ignored anyway. According to the HTTP spec, valid
// extensions would have a more strict syntax:
// (token ["=" (token | quoted-string)])
// but we gain nothing by rejecting an otherwise valid chunk size.
_ext if in_ext => {
//TODO: chunk extension byte;
},
// Finally, if we aren't in the extension and we're reading any
// other octet, the chunk size line is invalid!
_ => {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line"));
}
}
}
trace!("chunk size={:?}", size);
Ok(size)
}
#[cfg(test)]
mod tests {
use std::error::Error;
use std::io;
use super::{Decoder, read_chunk_size};
#[test]
fn test_read_chunk_size() {
fn read(s: &str, result: u64) {
assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap(), result);
}
fn read_err(s: &str) {
assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap_err().kind(),
io::ErrorKind::InvalidInput);
}
read("1\r\n", 1);
read("01\r\n", 1);
read("0\r\n", 0);
read("00\r\n", 0);
read("A\r\n", 10);
read("a\r\n", 10);
read("Ff\r\n", 255);
read("Ff \r\n", 255);
// Missing LF or CRLF
read_err("F\rF");
read_err("F");
// Invalid hex digit
read_err("X\r\n");
read_err("1X\r\n");
read_err("-\r\n");
read_err("-1\r\n");
// Acceptable (if not fully valid) extensions do not influence the size
read("1;extension\r\n", 1);
read("a;ext name=value\r\n", 10);
read("1;extension;extension2\r\n", 1);
read("1;;; ;\r\n", 1);
read("2; extension...\r\n", 2);
read("3 ; extension=123\r\n", 3);
read("3 ;\r\n", 3);
read("3 ; \r\n", 3);
// Invalid extensions cause an error
read_err("1 invalid extension\r\n");
read_err("1 A\r\n");
read_err("1;no CRLF");
}
#[test]
fn test_read_sized_early_eof() {
let mut bytes = &b"foo bar"[..];
let mut decoder = Decoder::length(10);
let mut buf = [0u8; 10];
assert_eq!(decoder.decode(&mut bytes, &mut buf).unwrap(), 7);
let e = decoder.decode(&mut bytes, &mut buf).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::Other);
assert_eq!(e.description(), "early eof");
}
#[test]
fn test_read_chunked_early_eof() {
let mut bytes = &b"\
9\r\n\
foo bar\
"[..];
let mut decoder = Decoder::chunked();
let mut buf = [0u8; 10];
assert_eq!(decoder.decode(&mut bytes, &mut buf).unwrap(), 7);
let e = decoder.decode(&mut bytes, &mut buf).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::Other);
assert_eq!(e.description(), "early eof");
}
}

371
src/http/h1/encode.rs Normal file
View File

@@ -0,0 +1,371 @@
use std::borrow::Cow;
use std::cmp;
use std::io::{self, Write};
use http::internal::{AtomicWrite, WriteBuf};
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug, Clone)]
pub struct Encoder {
kind: Kind,
prefix: Prefix, //Option<WriteBuf<Vec<u8>>>
}
#[derive(Debug, PartialEq, Clone)]
enum Kind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(Chunked),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
}
impl Encoder {
pub fn chunked() -> Encoder {
Encoder {
kind: Kind::Chunked(Chunked::Init),
prefix: Prefix(None)
}
}
pub fn length(len: u64) -> Encoder {
Encoder {
kind: Kind::Length(len),
prefix: Prefix(None)
}
}
pub fn prefix(&mut self, prefix: WriteBuf<Vec<u8>>) {
self.prefix.0 = Some(prefix);
}
pub fn is_eof(&self) -> bool {
if self.prefix.0.is_some() {
return false;
}
match self.kind {
Kind::Length(0) |
Kind::Chunked(Chunked::End) => true,
_ => false
}
}
pub fn end(self) -> Option<WriteBuf<Cow<'static, [u8]>>> {
let trailer = self.trailer();
let buf = self.prefix.0;
match (buf, trailer) {
(Some(mut buf), Some(trailer)) => {
buf.bytes.extend_from_slice(trailer);
Some(WriteBuf {
bytes: Cow::Owned(buf.bytes),
pos: buf.pos,
})
},
(Some(buf), None) => Some(WriteBuf {
bytes: Cow::Owned(buf.bytes),
pos: buf.pos
}),
(None, Some(trailer)) => {
Some(WriteBuf {
bytes: Cow::Borrowed(trailer),
pos: 0,
})
},
(None, None) => None
}
}
fn trailer(&self) -> Option<&'static [u8]> {
match self.kind {
Kind::Chunked(Chunked::Init) => {
Some(b"0\r\n\r\n")
}
_ => None
}
}
pub fn encode<W: AtomicWrite>(&mut self, w: &mut W, msg: &[u8]) -> io::Result<usize> {
match self.kind {
Kind::Chunked(ref mut chunked) => {
chunked.encode(w, &mut self.prefix, msg)
},
Kind::Length(ref mut remaining) => {
let mut n = {
let max = cmp::min(*remaining as usize, msg.len());
let slice = &msg[..max];
let prefix = self.prefix.0.as_ref().map(|buf| &buf.bytes[buf.pos..]).unwrap_or(b"");
try!(w.write_atomic(&[prefix, slice]))
};
n = self.prefix.update(n);
if n == 0 {
return Err(io::Error::new(io::ErrorKind::WouldBlock, "would block"));
}
*remaining -= n as u64;
Ok(n)
},
}
}
}
#[derive(Debug, PartialEq, Clone)]
enum Chunked {
Init,
Size(ChunkSize),
SizeCr,
SizeLf,
Body(usize),
BodyCr,
BodyLf,
End,
}
impl Chunked {
fn encode<W: AtomicWrite>(&mut self, w: &mut W, prefix: &mut Prefix, msg: &[u8]) -> io::Result<usize> {
match *self {
Chunked::Init => {
let mut size = ChunkSize {
bytes: [0; CHUNK_SIZE_MAX_BYTES],
pos: 0,
len: 0,
};
trace!("chunked write, size = {:?}", msg.len());
write!(&mut size, "{:X}", msg.len())
.expect("CHUNK_SIZE_MAX_BYTES should fit any usize");
*self = Chunked::Size(size);
}
Chunked::End => return Ok(0),
_ => {}
}
let mut n = {
let pieces = match *self {
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
Chunked::Size(ref size) => [
prefix.0.as_ref().map(|buf| &buf.bytes[buf.pos..]).unwrap_or(b""),
&size.bytes[size.pos.into() .. size.len.into()],
&b"\r\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::SizeCr => [
&b""[..],
&b""[..],
&b"\r\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::SizeLf => [
&b""[..],
&b""[..],
&b"\n"[..],
msg,
&b"\r\n"[..],
],
Chunked::Body(pos) => [
&b""[..],
&b""[..],
&b""[..],
&msg[pos..],
&b"\r\n"[..],
],
Chunked::BodyCr => [
&b""[..],
&b""[..],
&b""[..],
&b""[..],
&b"\r\n"[..],
],
Chunked::BodyLf => [
&b""[..],
&b""[..],
&b""[..],
&b""[..],
&b"\n"[..],
],
Chunked::End => unreachable!("Chunked::End shouldn't write more")
};
try!(w.write_atomic(&pieces))
};
if n > 0 {
n = prefix.update(n);
}
while n > 0 {
match *self {
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
Chunked::Size(mut size) => {
n = size.update(n);
if size.len == 0 {
*self = Chunked::SizeCr;
} else {
*self = Chunked::Size(size);
}
},
Chunked::SizeCr => {
*self = Chunked::SizeLf;
n -= 1;
}
Chunked::SizeLf => {
*self = Chunked::Body(0);
n -= 1;
}
Chunked::Body(pos) => {
let left = msg.len() - pos;
if n >= left {
*self = Chunked::BodyCr;
n -= left;
} else {
*self = Chunked::Body(pos + n);
n = 0;
}
}
Chunked::BodyCr => {
*self = Chunked::BodyLf;
n -= 1;
}
Chunked::BodyLf => {
assert!(n == 1);
*self = if msg.len() == 0 {
Chunked::End
} else {
Chunked::Init
};
n = 0;
},
Chunked::End => unreachable!("Chunked::End shouldn't have any to write")
}
}
match *self {
Chunked::Init |
Chunked::End => Ok(msg.len()),
_ => Err(io::Error::new(io::ErrorKind::WouldBlock, "chunked incomplete"))
}
}
}
#[cfg(target_pointer_width = "32")]
const USIZE_BYTES: usize = 4;
#[cfg(target_pointer_width = "64")]
const USIZE_BYTES: usize = 8;
// each byte will become 2 hex
const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;
#[derive(Clone, Copy)]
struct ChunkSize {
bytes: [u8; CHUNK_SIZE_MAX_BYTES],
pos: u8,
len: u8,
}
impl ChunkSize {
fn update(&mut self, n: usize) -> usize {
let diff = (self.len - self.pos).into();
if n >= diff {
self.pos = 0;
self.len = 0;
n - diff
} else {
self.pos += n as u8; // just verified it was a small usize
0
}
}
}
impl ::std::fmt::Debug for ChunkSize {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct("ChunkSize")
.field("bytes", &&self.bytes[..self.len.into()])
.field("pos", &self.pos)
.finish()
}
}
impl ::std::cmp::PartialEq for ChunkSize {
fn eq(&self, other: &ChunkSize) -> bool {
self.len == other.len &&
self.pos == other.pos &&
(&self.bytes[..]) == (&other.bytes[..])
}
}
impl io::Write for ChunkSize {
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
let n = (&mut self.bytes[self.len.into() ..]).write(msg)
.expect("&mut [u8].write() cannot error");
self.len += n as u8; // safe because bytes is never bigger than 256
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[derive(Debug, Clone)]
struct Prefix(Option<WriteBuf<Vec<u8>>>);
impl Prefix {
fn update(&mut self, n: usize) -> usize {
if let Some(mut buf) = self.0.take() {
if buf.bytes.len() - buf.pos > n {
buf.pos += n;
self.0 = Some(buf);
0
} else {
let nbuf = buf.bytes.len() - buf.pos;
n - nbuf
}
} else {
n
}
}
}
#[cfg(test)]
mod tests {
use super::Encoder;
use mock::{Async, Buf};
#[test]
fn test_write_chunked_sync() {
let mut dst = Buf::new();
let mut encoder = Encoder::chunked();
encoder.encode(&mut dst, b"foo bar").unwrap();
encoder.encode(&mut dst, b"baz quux herp").unwrap();
encoder.encode(&mut dst, b"").unwrap();
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
}
#[test]
fn test_write_chunked_async() {
let mut dst = Async::new(Buf::new(), 7);
let mut encoder = Encoder::chunked();
assert!(encoder.encode(&mut dst, b"foo bar").is_err());
dst.block_in(6);
assert_eq!(7, encoder.encode(&mut dst, b"foo bar").unwrap());
dst.block_in(30);
assert_eq!(13, encoder.encode(&mut dst, b"baz quux herp").unwrap());
encoder.encode(&mut dst, b"").unwrap();
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
}
#[test]
fn test_write_sized() {
let mut dst = Buf::new();
let mut encoder = Encoder::length(8);
encoder.encode(&mut dst, b"foo bar").unwrap();
assert_eq!(encoder.encode(&mut dst, b"baz").unwrap(), 1);
assert_eq!(dst, b"foo barb");
}
}

136
src/http/h1/mod.rs Normal file
View File

@@ -0,0 +1,136 @@
/*
use std::fmt;
use std::io::{self, Write};
use std::marker::PhantomData;
use std::sync::mpsc;
use url::Url;
use tick;
use time::now_utc;
use header::{self, Headers};
use http::{self, conn};
use method::Method;
use net::{Fresh, Streaming};
use status::StatusCode;
use version::HttpVersion;
*/
pub use self::decode::Decoder;
pub use self::encode::Encoder;
pub use self::parse::parse;
mod decode;
mod encode;
mod parse;
/*
fn should_have_response_body(method: &Method, status: u16) -> bool {
trace!("should_have_response_body({:?}, {})", method, status);
match (method, status) {
(&Method::Head, _) |
(_, 100...199) |
(_, 204) |
(_, 304) |
(&Method::Connect, 200...299) => false,
_ => true
}
}
*/
/*
const MAX_INVALID_RESPONSE_BYTES: usize = 1024 * 128;
impl HttpMessage for Http11Message {
fn get_incoming(&mut self) -> ::Result<ResponseHead> {
unimplemented!();
/*
try!(self.flush_outgoing());
let stream = match self.stream.take() {
Some(stream) => stream,
None => {
// The message was already in the reading state...
// TODO Decide what happens in case we try to get a new incoming at that point
return Err(From::from(
io::Error::new(io::ErrorKind::Other,
"Read already in progress")));
}
};
let expected_no_content = stream.previous_response_expected_no_content();
trace!("previous_response_expected_no_content = {}", expected_no_content);
let mut stream = BufReader::new(stream);
let mut invalid_bytes_read = 0;
let head;
loop {
head = match parse_response(&mut stream) {
Ok(head) => head,
Err(::Error::Version)
if expected_no_content && invalid_bytes_read < MAX_INVALID_RESPONSE_BYTES => {
trace!("expected_no_content, found content");
invalid_bytes_read += 1;
stream.consume(1);
continue;
}
Err(e) => {
self.stream = Some(stream.into_inner());
return Err(e);
}
};
break;
}
let raw_status = head.subject;
let headers = head.headers;
let method = self.method.take().unwrap_or(Method::Get);
let is_empty = !should_have_response_body(&method, raw_status.0);
stream.get_mut().set_previous_response_expected_no_content(is_empty);
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
// 1. HEAD reponses, and Status 1xx, 204, and 304 cannot have a body.
// 2. Status 2xx to a CONNECT cannot have a body.
// 3. Transfer-Encoding: chunked has a chunked body.
// 4. If multiple differing Content-Length headers or invalid, close connection.
// 5. Content-Length header has a sized body.
// 6. Not Client.
// 7. Read till EOF.
self.reader = Some(if is_empty {
SizedReader(stream, 0)
} else {
if let Some(&TransferEncoding(ref codings)) = headers.get() {
if codings.last() == Some(&Chunked) {
ChunkedReader(stream, None)
} else {
trace!("not chuncked. read till eof");
EofReader(stream)
}
} else if let Some(&ContentLength(len)) = headers.get() {
SizedReader(stream, len)
} else if headers.has::<ContentLength>() {
trace!("illegal Content-Length: {:?}", headers.get_raw("Content-Length"));
return Err(Error::Header);
} else {
trace!("neither Transfer-Encoding nor Content-Length");
EofReader(stream)
}
});
trace!("Http11Message.reader = {:?}", self.reader);
Ok(ResponseHead {
headers: headers,
raw_status: raw_status,
version: head.version,
})
*/
}
}
*/

246
src/http/h1/parse.rs Normal file
View File

@@ -0,0 +1,246 @@
use std::borrow::Cow;
use std::io::Write;
use httparse;
use header::{self, Headers, ContentLength, TransferEncoding};
use http::{MessageHead, RawStatus, Http1Message, ParseResult, Next, ServerMessage, ClientMessage, Next_, RequestLine};
use http::h1::{Encoder, Decoder};
use method::Method;
use status::StatusCode;
use version::HttpVersion::{Http10, Http11};
const MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
pub fn parse<T: Http1Message<Incoming=I>, I>(buf: &[u8]) -> ParseResult<I> {
if buf.len() == 0 {
return Ok(None);
}
trace!("parse({:?})", buf);
<T as Http1Message>::parse(buf)
}
impl Http1Message for ServerMessage {
type Incoming = RequestLine;
type Outgoing = StatusCode;
fn initial_interest() -> Next {
Next::new(Next_::Read)
}
fn parse(buf: &[u8]) -> ParseResult<RequestLine> {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
trace!("Request.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
let mut req = httparse::Request::new(&mut headers);
Ok(match try!(req.parse(buf)) {
httparse::Status::Complete(len) => {
trace!("Request.parse Complete({})", len);
Some((MessageHead {
version: if req.version.unwrap() == 1 { Http11 } else { Http10 },
subject: RequestLine(
try!(req.method.unwrap().parse()),
try!(req.path.unwrap().parse())
),
headers: try!(Headers::from_raw(req.headers))
}, len))
},
httparse::Status::Partial => None
})
}
fn decoder(head: &MessageHead<Self::Incoming>) -> ::Result<Decoder> {
use ::header;
if let Some(&header::ContentLength(len)) = head.headers.get() {
Ok(Decoder::length(len))
} else if head.headers.has::<header::TransferEncoding>() {
//TODO: check for Transfer-Encoding: chunked
Ok(Decoder::chunked())
} else {
Ok(Decoder::length(0))
}
}
fn encode(mut head: MessageHead<Self::Outgoing>, dst: &mut Vec<u8>) -> Encoder {
use ::header;
trace!("writing head: {:?}", head);
if !head.headers.has::<header::Date>() {
head.headers.set(header::Date(header::HttpDate(::time::now_utc())));
}
let mut is_chunked = true;
let mut body = Encoder::chunked();
if let Some(cl) = head.headers.get::<header::ContentLength>() {
body = Encoder::length(**cl);
is_chunked = false
}
if is_chunked {
let encodings = match head.headers.get_mut::<header::TransferEncoding>() {
Some(&mut header::TransferEncoding(ref mut encodings)) => {
if encodings.last() != Some(&header::Encoding::Chunked) {
encodings.push(header::Encoding::Chunked);
}
false
},
None => true
};
if encodings {
head.headers.set(header::TransferEncoding(vec![header::Encoding::Chunked]));
}
}
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
debug!("writing {:#?}", head.headers);
let _ = write!(dst, "{} {}\r\n{}\r\n", head.version, head.subject, head.headers);
body
}
}
impl Http1Message for ClientMessage {
type Incoming = RawStatus;
type Outgoing = RequestLine;
fn initial_interest() -> Next {
Next::new(Next_::Write)
}
fn parse(buf: &[u8]) -> ParseResult<RawStatus> {
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
trace!("Response.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
let mut res = httparse::Response::new(&mut headers);
Ok(match try!(res.parse(buf)) {
httparse::Status::Complete(len) => {
trace!("Response.try_parse Complete({})", len);
let code = res.code.unwrap();
let reason = match StatusCode::from_u16(code).canonical_reason() {
Some(reason) if reason == res.reason.unwrap() => Cow::Borrowed(reason),
_ => Cow::Owned(res.reason.unwrap().to_owned())
};
Some((MessageHead {
version: if res.version.unwrap() == 1 { Http11 } else { Http10 },
subject: RawStatus(code, reason),
headers: try!(Headers::from_raw(res.headers))
}, len))
},
httparse::Status::Partial => None
})
}
fn decoder(inc: &MessageHead<Self::Incoming>) -> ::Result<Decoder> {
use ::header;
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
// 1. HEAD reponses, and Status 1xx, 204, and 304 cannot have a body.
// 2. Status 2xx to a CONNECT cannot have a body.
//
// First two steps taken care of before this method.
//
// 3. Transfer-Encoding: chunked has a chunked body.
// 4. If multiple differing Content-Length headers or invalid, close connection.
// 5. Content-Length header has a sized body.
// 6. Not Client.
// 7. Read till EOF.
if let Some(&header::TransferEncoding(ref codings)) = inc.headers.get() {
if codings.last() == Some(&header::Encoding::Chunked) {
Ok(Decoder::chunked())
} else {
trace!("not chuncked. read till eof");
Ok(Decoder::eof())
}
} else if let Some(&header::ContentLength(len)) = inc.headers.get() {
Ok(Decoder::length(len))
} else if inc.headers.has::<header::ContentLength>() {
trace!("illegal Content-Length: {:?}", inc.headers.get_raw("Content-Length"));
Err(::Error::Header)
} else {
trace!("neither Transfer-Encoding nor Content-Length");
Ok(Decoder::eof())
}
}
fn encode(mut head: MessageHead<Self::Outgoing>, dst: &mut Vec<u8>) -> Encoder {
trace!("writing head: {:?}", head);
let mut body = Encoder::length(0);
let expects_no_body = match head.subject.0 {
Method::Head | Method::Get | Method::Connect => true,
_ => false
};
let mut chunked = false;
if let Some(con_len) = head.headers.get::<ContentLength>() {
body = Encoder::length(**con_len);
} else {
chunked = !expects_no_body;
}
if chunked {
body = Encoder::chunked();
let encodings = match head.headers.get_mut::<TransferEncoding>() {
Some(encodings) => {
//TODO: check if Chunked already exists
encodings.push(header::Encoding::Chunked);
true
},
None => false
};
if !encodings {
head.headers.set(TransferEncoding(vec![header::Encoding::Chunked]));
}
}
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
debug!("writing {:#?}", head.headers);
let _ = write!(dst, "{} {}\r\n{}\r\n", head.subject, head.version, head.headers);
body
}
}
#[cfg(test)]
mod tests {
use http;
use super::{parse};
#[test]
fn test_parse_request() {
let raw = b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n";
parse::<http::ServerMessage, _>(raw).unwrap();
}
#[test]
fn test_parse_raw_status() {
let raw = b"HTTP/1.1 200 OK\r\n\r\n";
let (res, _) = parse::<http::ClientMessage, _>(raw).unwrap().unwrap();
assert_eq!(res.subject.1, "OK");
let raw = b"HTTP/1.1 200 Howdy\r\n\r\n";
let (res, _) = parse::<http::ClientMessage, _>(raw).unwrap().unwrap();
assert_eq!(res.subject.1, "Howdy");
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn bench_parse_incoming(b: &mut Bencher) {
let raw = b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n";
b.iter(|| {
parse::<http::ServerMessage, _>(raw).unwrap()
});
}
}