feat(lib): switch to non-blocking (asynchronous) IO
BREAKING CHANGE: This breaks a lot of the Client and Server APIs. Check the documentation for how Handlers can be used for asynchronous events.
This commit is contained in:
120
src/http/buffer.rs
Normal file
120
src/http/buffer.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use std::cmp;
|
||||
use std::io::{self, Read};
|
||||
use std::ptr;
|
||||
|
||||
|
||||
const INIT_BUFFER_SIZE: usize = 4096;
|
||||
const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Buffer {
|
||||
vec: Vec<u8>,
|
||||
read_pos: usize,
|
||||
write_pos: usize,
|
||||
}
|
||||
|
||||
impl Buffer {
|
||||
pub fn new() -> Buffer {
|
||||
Buffer {
|
||||
vec: Vec::new(),
|
||||
read_pos: 0,
|
||||
write_pos: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
*self = Buffer::new()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.read_pos - self.write_pos
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn bytes(&self) -> &[u8] {
|
||||
&self.vec[self.write_pos..self.read_pos]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn consume(&mut self, pos: usize) {
|
||||
debug_assert!(self.read_pos >= self.write_pos + pos);
|
||||
self.write_pos += pos;
|
||||
if self.write_pos == self.read_pos {
|
||||
self.write_pos = 0;
|
||||
self.read_pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_from<R: Read>(&mut self, r: &mut R) -> io::Result<usize> {
|
||||
self.maybe_reserve();
|
||||
let n = try!(r.read(&mut self.vec[self.read_pos..]));
|
||||
self.read_pos += n;
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn maybe_reserve(&mut self) {
|
||||
let cap = self.vec.len();
|
||||
if cap == 0 {
|
||||
trace!("reserving initial {}", INIT_BUFFER_SIZE);
|
||||
self.vec = vec![0; INIT_BUFFER_SIZE];
|
||||
} else if self.write_pos > 0 && self.read_pos == cap {
|
||||
let count = self.read_pos - self.write_pos;
|
||||
trace!("moving buffer bytes over by {}", count);
|
||||
unsafe {
|
||||
ptr::copy(
|
||||
self.vec.as_ptr().offset(self.write_pos as isize),
|
||||
self.vec.as_mut_ptr(),
|
||||
count
|
||||
);
|
||||
}
|
||||
self.read_pos -= count;
|
||||
self.write_pos = 0;
|
||||
} else if self.read_pos == cap && cap < MAX_BUFFER_SIZE {
|
||||
self.vec.reserve(cmp::min(cap * 4, MAX_BUFFER_SIZE) - cap);
|
||||
let new = self.vec.capacity() - cap;
|
||||
trace!("reserved {}", new);
|
||||
unsafe { grow_zerofill(&mut self.vec, new) }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wrap<'a, 'b: 'a, R: io::Read>(&'a mut self, reader: &'b mut R) -> BufReader<'a, R> {
|
||||
BufReader {
|
||||
buf: self,
|
||||
reader: reader
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BufReader<'a, R: io::Read + 'a> {
|
||||
buf: &'a mut Buffer,
|
||||
reader: &'a mut R
|
||||
}
|
||||
|
||||
impl<'a, R: io::Read> Read for BufReader<'a, R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
trace!("BufReader.read self={}, buf={}", self.buf.len(), buf.len());
|
||||
let n = try!(self.buf.bytes().read(buf));
|
||||
self.buf.consume(n);
|
||||
if n == 0 {
|
||||
self.buf.reset();
|
||||
self.reader.read(&mut buf[n..])
|
||||
} else {
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn grow_zerofill(buf: &mut Vec<u8>, additional: usize) {
|
||||
let len = buf.len();
|
||||
buf.set_len(len + additional);
|
||||
ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len());
|
||||
}
|
||||
96
src/http/channel.rs
Normal file
96
src/http/channel.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, mpsc};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use ::rotor;
|
||||
|
||||
pub use std::sync::mpsc::TryRecvError;
|
||||
|
||||
pub fn new<T>(notify: rotor::Notifier) -> (Sender<T>, Receiver<T>) {
|
||||
let b = Arc::new(AtomicBool::new(false));
|
||||
let (tx, rx) = mpsc::channel();
|
||||
(Sender {
|
||||
awake: b.clone(),
|
||||
notify: notify,
|
||||
tx: tx,
|
||||
},
|
||||
Receiver {
|
||||
awake: b,
|
||||
rx: rx,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn share<T, U>(other: &Sender<U>) -> (Sender<T>, Receiver<T>) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let notify = other.notify.clone();
|
||||
let b = other.awake.clone();
|
||||
(Sender {
|
||||
awake: b.clone(),
|
||||
notify: notify,
|
||||
tx: tx,
|
||||
},
|
||||
Receiver {
|
||||
awake: b,
|
||||
rx: rx,
|
||||
})
|
||||
}
|
||||
|
||||
pub struct Sender<T> {
|
||||
awake: Arc<AtomicBool>,
|
||||
notify: rotor::Notifier,
|
||||
tx: mpsc::Sender<T>,
|
||||
}
|
||||
|
||||
impl<T: Send> Sender<T> {
|
||||
pub fn send(&self, val: T) -> Result<(), SendError<T>> {
|
||||
try!(self.tx.send(val));
|
||||
if !self.awake.swap(true, Ordering::SeqCst) {
|
||||
try!(self.notify.wakeup());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Sender<T> {
|
||||
fn clone(&self) -> Sender<T> {
|
||||
Sender {
|
||||
awake: self.awake.clone(),
|
||||
notify: self.notify.clone(),
|
||||
tx: self.tx.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Sender<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Sender")
|
||||
.field("notify", &self.notify)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SendError<T>(pub Option<T>);
|
||||
|
||||
impl<T> From<mpsc::SendError<T>> for SendError<T> {
|
||||
fn from(e: mpsc::SendError<T>) -> SendError<T> {
|
||||
SendError(Some(e.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<rotor::WakeupError> for SendError<T> {
|
||||
fn from(_e: rotor::WakeupError) -> SendError<T> {
|
||||
SendError(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Receiver<T> {
|
||||
awake: Arc<AtomicBool>,
|
||||
rx: mpsc::Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T: Send> Receiver<T> {
|
||||
pub fn try_recv(&self) -> Result<T, mpsc::TryRecvError> {
|
||||
self.awake.store(false, Ordering::Relaxed);
|
||||
self.rx.try_recv()
|
||||
}
|
||||
}
|
||||
915
src/http/conn.rs
Normal file
915
src/http/conn.rs
Normal file
@@ -0,0 +1,915 @@
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
use std::hash::Hash;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::time::Duration;
|
||||
|
||||
use rotor::{self, EventSet, PollOpt, Scope};
|
||||
|
||||
use http::{self, h1, Http1Message, Encoder, Decoder, Next, Next_, Reg, Control};
|
||||
use http::channel;
|
||||
use http::internal::WriteBuf;
|
||||
use http::buffer::Buffer;
|
||||
use net::{Transport, Blocked};
|
||||
use version::HttpVersion;
|
||||
|
||||
const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
|
||||
|
||||
/// This handles a connection, which will have been established over a
|
||||
/// Transport (like a socket), and will likely include multiple
|
||||
/// `Message`s over HTTP.
|
||||
///
|
||||
/// The connection will determine when a message begins and ends, creating
|
||||
/// a new message `MessageHandler` for each one, as well as determine if this
|
||||
/// connection can be kept alive after the message, or if it is complete.
|
||||
pub struct Conn<K: Key, T: Transport, H: MessageHandler<T>> {
|
||||
buf: Buffer,
|
||||
ctrl: (channel::Sender<Next>, channel::Receiver<Next>),
|
||||
keep_alive_enabled: bool,
|
||||
key: K,
|
||||
state: State<H, T>,
|
||||
transport: T,
|
||||
}
|
||||
|
||||
impl<K: Key, T: Transport, H: MessageHandler<T>> fmt::Debug for Conn<K, T, H> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Conn")
|
||||
.field("keep_alive_enabled", &self.keep_alive_enabled)
|
||||
.field("state", &self.state)
|
||||
.field("buf", &self.buf)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Key, T: Transport, H: MessageHandler<T>> Conn<K, T, H> {
|
||||
pub fn new(key: K, transport: T, notify: rotor::Notifier) -> Conn<K, T, H> {
|
||||
Conn {
|
||||
buf: Buffer::new(),
|
||||
ctrl: channel::new(notify),
|
||||
keep_alive_enabled: true,
|
||||
key: key,
|
||||
state: State::Init,
|
||||
transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keep_alive(mut self, val: bool) -> Conn<K, T, H> {
|
||||
self.keep_alive_enabled = val;
|
||||
self
|
||||
}
|
||||
|
||||
/// Desired Register interest based on state of current connection.
|
||||
///
|
||||
/// This includes the user interest, such as when they return `Next::read()`.
|
||||
fn interest(&self) -> Reg {
|
||||
match self.state {
|
||||
State::Closed => Reg::Remove,
|
||||
State::Init => {
|
||||
<H as MessageHandler>::Message::initial_interest().interest()
|
||||
}
|
||||
State::Http1(Http1 { reading: Reading::Closed, writing: Writing::Closed, .. }) => {
|
||||
Reg::Remove
|
||||
}
|
||||
State::Http1(Http1 { ref reading, ref writing, .. }) => {
|
||||
let read = match *reading {
|
||||
Reading::Parse |
|
||||
Reading::Body(..) => Reg::Read,
|
||||
Reading::Init |
|
||||
Reading::Wait(..) |
|
||||
Reading::KeepAlive |
|
||||
Reading::Closed => Reg::Wait
|
||||
};
|
||||
|
||||
let write = match *writing {
|
||||
Writing::Head |
|
||||
Writing::Chunk(..) |
|
||||
Writing::Ready(..) => Reg::Write,
|
||||
Writing::Init |
|
||||
Writing::Wait(..) |
|
||||
Writing::KeepAlive => Reg::Wait,
|
||||
Writing::Closed => Reg::Wait,
|
||||
};
|
||||
|
||||
match (read, write) {
|
||||
(Reg::Read, Reg::Write) => Reg::ReadWrite,
|
||||
(Reg::Read, Reg::Wait) => Reg::Read,
|
||||
(Reg::Wait, Reg::Write) => Reg::Write,
|
||||
(Reg::Wait, Reg::Wait) => Reg::Wait,
|
||||
_ => unreachable!("bad read/write reg combo")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Actual register action.
|
||||
///
|
||||
/// Considers the user interest(), but also compares if the underlying
|
||||
/// transport is blocked(), and adjusts accordingly.
|
||||
fn register(&self) -> Reg {
|
||||
let reg = self.interest();
|
||||
match (reg, self.transport.blocked()) {
|
||||
(Reg::Remove, _) |
|
||||
(Reg::Wait, _) |
|
||||
(_, None) => reg,
|
||||
|
||||
(_, Some(Blocked::Read)) => Reg::Read,
|
||||
(_, Some(Blocked::Write)) => Reg::Write,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse(&mut self) -> ::Result<http::MessageHead<<<H as MessageHandler<T>>::Message as Http1Message>::Incoming>> {
|
||||
let n = try!(self.buf.read_from(&mut self.transport));
|
||||
if n == 0 {
|
||||
trace!("parse eof");
|
||||
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "parse eof").into());
|
||||
}
|
||||
match try!(http::parse::<<H as MessageHandler<T>>::Message, _>(self.buf.bytes())) {
|
||||
Some((head, len)) => {
|
||||
trace!("parsed {} bytes out of {}", len, self.buf.len());
|
||||
self.buf.consume(len);
|
||||
Ok(head)
|
||||
},
|
||||
None => {
|
||||
if self.buf.len() >= MAX_BUFFER_SIZE {
|
||||
//TODO: Handler.on_too_large_error()
|
||||
debug!("MAX_BUFFER_SIZE reached, closing");
|
||||
Err(::Error::TooLarge)
|
||||
} else {
|
||||
Err(io::Error::new(io::ErrorKind::WouldBlock, "incomplete parse").into())
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn read<F: MessageHandlerFactory<K, T, Output=H>>(&mut self, scope: &mut Scope<F>, state: State<H, T>) -> State<H, T> {
|
||||
match state {
|
||||
State::Init => {
|
||||
let head = match self.parse() {
|
||||
Ok(head) => head,
|
||||
Err(::Error::Io(e)) => match e.kind() {
|
||||
io::ErrorKind::WouldBlock |
|
||||
io::ErrorKind::Interrupted => return State::Init,
|
||||
_ => {
|
||||
debug!("io error trying to parse {:?}", e);
|
||||
return State::Closed;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
//TODO: send proper error codes depending on error
|
||||
trace!("parse eror: {:?}", e);
|
||||
return State::Closed;
|
||||
}
|
||||
};
|
||||
match <<H as MessageHandler<T>>::Message as Http1Message>::decoder(&head) {
|
||||
Ok(decoder) => {
|
||||
trace!("decoder = {:?}", decoder);
|
||||
let keep_alive = self.keep_alive_enabled && head.should_keep_alive();
|
||||
let mut handler = scope.create(Seed(&self.key, &self.ctrl.0));
|
||||
let next = handler.on_incoming(head);
|
||||
trace!("handler.on_incoming() -> {:?}", next);
|
||||
|
||||
match next.interest {
|
||||
Next_::Read => self.read(scope, State::Http1(Http1 {
|
||||
handler: handler,
|
||||
reading: Reading::Body(decoder),
|
||||
writing: Writing::Init,
|
||||
keep_alive: keep_alive,
|
||||
timeout: next.timeout,
|
||||
_marker: PhantomData,
|
||||
})),
|
||||
Next_::Write => State::Http1(Http1 {
|
||||
handler: handler,
|
||||
reading: if decoder.is_eof() {
|
||||
if keep_alive {
|
||||
Reading::KeepAlive
|
||||
} else {
|
||||
Reading::Closed
|
||||
}
|
||||
} else {
|
||||
Reading::Wait(decoder)
|
||||
},
|
||||
writing: Writing::Head,
|
||||
keep_alive: keep_alive,
|
||||
timeout: next.timeout,
|
||||
_marker: PhantomData,
|
||||
}),
|
||||
Next_::ReadWrite => self.read(scope, State::Http1(Http1 {
|
||||
handler: handler,
|
||||
reading: Reading::Body(decoder),
|
||||
writing: Writing::Head,
|
||||
keep_alive: keep_alive,
|
||||
timeout: next.timeout,
|
||||
_marker: PhantomData,
|
||||
})),
|
||||
Next_::Wait => State::Http1(Http1 {
|
||||
handler: handler,
|
||||
reading: Reading::Wait(decoder),
|
||||
writing: Writing::Init,
|
||||
keep_alive: keep_alive,
|
||||
timeout: next.timeout,
|
||||
_marker: PhantomData,
|
||||
}),
|
||||
Next_::End |
|
||||
Next_::Remove => State::Closed,
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
debug!("error creating decoder: {:?}", e);
|
||||
//TODO: respond with 400
|
||||
State::Closed
|
||||
}
|
||||
}
|
||||
},
|
||||
State::Http1(mut http1) => {
|
||||
let next = match http1.reading {
|
||||
Reading::Init => None,
|
||||
Reading::Parse => match self.parse() {
|
||||
Ok(head) => match <<H as MessageHandler<T>>::Message as Http1Message>::decoder(&head) {
|
||||
Ok(decoder) => {
|
||||
trace!("decoder = {:?}", decoder);
|
||||
// if client request asked for keep alive,
|
||||
// then it depends entirely on if the server agreed
|
||||
if http1.keep_alive {
|
||||
http1.keep_alive = head.should_keep_alive();
|
||||
}
|
||||
let next = http1.handler.on_incoming(head);
|
||||
http1.reading = Reading::Wait(decoder);
|
||||
trace!("handler.on_incoming() -> {:?}", next);
|
||||
Some(next)
|
||||
},
|
||||
Err(e) => {
|
||||
debug!("error creating decoder: {:?}", e);
|
||||
//TODO: respond with 400
|
||||
return State::Closed;
|
||||
}
|
||||
},
|
||||
Err(::Error::Io(e)) => match e.kind() {
|
||||
io::ErrorKind::WouldBlock |
|
||||
io::ErrorKind::Interrupted => None,
|
||||
_ => {
|
||||
debug!("io error trying to parse {:?}", e);
|
||||
return State::Closed;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
//TODO: send proper error codes depending on error
|
||||
trace!("parse eror: {:?}", e);
|
||||
return State::Closed;
|
||||
}
|
||||
},
|
||||
Reading::Body(ref mut decoder) => {
|
||||
let wrapped = if !self.buf.is_empty() {
|
||||
super::Trans::Buf(self.buf.wrap(&mut self.transport))
|
||||
} else {
|
||||
super::Trans::Port(&mut self.transport)
|
||||
};
|
||||
|
||||
Some(http1.handler.on_decode(&mut Decoder::h1(decoder, wrapped)))
|
||||
},
|
||||
_ => {
|
||||
trace!("Conn.on_readable State::Http1(reading = {:?})", http1.reading);
|
||||
None
|
||||
}
|
||||
};
|
||||
let mut s = State::Http1(http1);
|
||||
trace!("h1 read completed, next = {:?}", next);
|
||||
if let Some(next) = next {
|
||||
s.update(next);
|
||||
}
|
||||
trace!("h1 read completed, state = {:?}", s);
|
||||
|
||||
let again = match s {
|
||||
State::Http1(Http1 { reading: Reading::Body(ref encoder), .. }) => encoder.is_eof(),
|
||||
_ => false
|
||||
};
|
||||
|
||||
if again {
|
||||
self.read(scope, s)
|
||||
} else {
|
||||
s
|
||||
}
|
||||
},
|
||||
State::Closed => {
|
||||
error!("on_readable State::Closed");
|
||||
State::Closed
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
fn write<F: MessageHandlerFactory<K, T, Output=H>>(&mut self, scope: &mut Scope<F>, mut state: State<H, T>) -> State<H, T> {
|
||||
let next = match state {
|
||||
State::Init => {
|
||||
// this could be a Client request, which writes first, so pay
|
||||
// attention to the version written here, which will adjust
|
||||
// our internal state to Http1 or Http2
|
||||
let mut handler = scope.create(Seed(&self.key, &self.ctrl.0));
|
||||
let mut head = http::MessageHead::default();
|
||||
let interest = handler.on_outgoing(&mut head);
|
||||
if head.version == HttpVersion::Http11 {
|
||||
let mut buf = Vec::new();
|
||||
let keep_alive = self.keep_alive_enabled && head.should_keep_alive();
|
||||
let mut encoder = H::Message::encode(head, &mut buf);
|
||||
let writing = match interest.interest {
|
||||
// user wants to write some data right away
|
||||
// try to write the headers and the first chunk
|
||||
// together, so they are in the same packet
|
||||
Next_::Write |
|
||||
Next_::ReadWrite => {
|
||||
encoder.prefix(WriteBuf {
|
||||
bytes: buf,
|
||||
pos: 0
|
||||
});
|
||||
Writing::Ready(encoder)
|
||||
},
|
||||
_ => Writing::Chunk(Chunk {
|
||||
buf: Cow::Owned(buf),
|
||||
pos: 0,
|
||||
next: (encoder, interest.clone())
|
||||
})
|
||||
};
|
||||
state = State::Http1(Http1 {
|
||||
reading: Reading::Init,
|
||||
writing: writing,
|
||||
handler: handler,
|
||||
keep_alive: keep_alive,
|
||||
timeout: interest.timeout,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
Some(interest)
|
||||
}
|
||||
State::Http1(Http1 { ref mut handler, ref mut writing, ref mut keep_alive, .. }) => {
|
||||
match *writing {
|
||||
Writing::Init => {
|
||||
trace!("Conn.on_writable Http1::Writing::Init");
|
||||
None
|
||||
}
|
||||
Writing::Head => {
|
||||
let mut head = http::MessageHead::default();
|
||||
let interest = handler.on_outgoing(&mut head);
|
||||
// if the request wants to close, server cannot stop it
|
||||
if *keep_alive {
|
||||
// if the request wants to stay alive, then it depends
|
||||
// on the server to agree
|
||||
*keep_alive = head.should_keep_alive();
|
||||
}
|
||||
let mut buf = Vec::new();
|
||||
let mut encoder = <<H as MessageHandler<T>>::Message as Http1Message>::encode(head, &mut buf);
|
||||
*writing = match interest.interest {
|
||||
// user wants to write some data right away
|
||||
// try to write the headers and the first chunk
|
||||
// together, so they are in the same packet
|
||||
Next_::Write |
|
||||
Next_::ReadWrite => {
|
||||
encoder.prefix(WriteBuf {
|
||||
bytes: buf,
|
||||
pos: 0
|
||||
});
|
||||
Writing::Ready(encoder)
|
||||
},
|
||||
_ => Writing::Chunk(Chunk {
|
||||
buf: Cow::Owned(buf),
|
||||
pos: 0,
|
||||
next: (encoder, interest.clone())
|
||||
})
|
||||
};
|
||||
Some(interest)
|
||||
},
|
||||
Writing::Chunk(ref mut chunk) => {
|
||||
trace!("Http1.Chunk on_writable");
|
||||
match self.transport.write(&chunk.buf.as_ref()[chunk.pos..]) {
|
||||
Ok(n) => {
|
||||
chunk.pos += n;
|
||||
trace!("Http1.Chunk wrote={}, done={}", n, chunk.is_written());
|
||||
if chunk.is_written() {
|
||||
Some(chunk.next.1.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
Err(e) => match e.kind() {
|
||||
io::ErrorKind::WouldBlock |
|
||||
io::ErrorKind::Interrupted => None,
|
||||
_ => {
|
||||
Some(handler.on_error(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Writing::Ready(ref mut encoder) => {
|
||||
trace!("Http1.Ready on_writable");
|
||||
Some(handler.on_encode(&mut Encoder::h1(encoder, &mut self.transport)))
|
||||
},
|
||||
Writing::Wait(..) => {
|
||||
trace!("Conn.on_writable Http1::Writing::Wait");
|
||||
None
|
||||
}
|
||||
Writing::KeepAlive => {
|
||||
trace!("Conn.on_writable Http1::Writing::KeepAlive");
|
||||
None
|
||||
}
|
||||
Writing::Closed => {
|
||||
trace!("on_writable Http1::Writing::Closed");
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
State::Closed => {
|
||||
trace!("on_writable State::Closed");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(next) = next {
|
||||
state.update(next);
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
fn can_read_more(&self) -> bool {
|
||||
match self.state {
|
||||
State::Init => false,
|
||||
_ => !self.buf.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ready<F>(mut self, events: EventSet, scope: &mut Scope<F>) -> Option<(Self, Option<Duration>)>
|
||||
where F: MessageHandlerFactory<K, T, Output=H> {
|
||||
trace!("Conn::ready events='{:?}', blocked={:?}", events, self.transport.blocked());
|
||||
|
||||
if events.is_error() {
|
||||
match self.transport.take_socket_error() {
|
||||
Ok(_) => {
|
||||
trace!("is_error, but not socket error");
|
||||
// spurious?
|
||||
},
|
||||
Err(e) => self.on_error(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
// if the user had an io interest, but the transport was blocked differently,
|
||||
// the event needs to be translated to what the user was actually expecting.
|
||||
//
|
||||
// Example:
|
||||
// - User asks for `Next::write().
|
||||
// - But transport is in the middle of renegotiating TLS, and is blocked on reading.
|
||||
// - hyper should not wait on the `write` event, since epoll already
|
||||
// knows it is writable. We would just loop a whole bunch, and slow down.
|
||||
// - So instead, hyper waits on the event needed to unblock the transport, `read`.
|
||||
// - Once epoll detects the transport is readable, it will alert hyper
|
||||
// with a `readable` event.
|
||||
// - hyper needs to translate that `readable` event back into a `write`,
|
||||
// since that is actually what the Handler wants.
|
||||
|
||||
let events = if let Some(blocked) = self.transport.blocked() {
|
||||
let interest = self.interest();
|
||||
trace!("translating blocked={:?}, interest={:?}", blocked, interest);
|
||||
match (blocked, interest) {
|
||||
(Blocked::Read, Reg::Write) => EventSet::writable(),
|
||||
(Blocked::Write, Reg::Read) => EventSet::readable(),
|
||||
// otherwise, the transport was blocked on the same thing the user wanted
|
||||
_ => events
|
||||
}
|
||||
} else {
|
||||
events
|
||||
};
|
||||
|
||||
if events.is_readable() {
|
||||
self.on_readable(scope);
|
||||
}
|
||||
|
||||
if events.is_writable() {
|
||||
self.on_writable(scope);
|
||||
}
|
||||
|
||||
let events = match self.register() {
|
||||
Reg::Read => EventSet::readable(),
|
||||
Reg::Write => EventSet::writable(),
|
||||
Reg::ReadWrite => EventSet::readable() | EventSet::writable(),
|
||||
Reg::Wait => EventSet::none(),
|
||||
Reg::Remove => {
|
||||
trace!("removing transport");
|
||||
let _ = scope.deregister(&self.transport);
|
||||
self.on_remove();
|
||||
return None;
|
||||
},
|
||||
};
|
||||
|
||||
if events.is_readable() && self.can_read_more() {
|
||||
return self.ready(events, scope);
|
||||
}
|
||||
|
||||
trace!("scope.reregister({:?})", events);
|
||||
match scope.reregister(&self.transport, events, PollOpt::level()) {
|
||||
Ok(..) => {
|
||||
let timeout = self.state.timeout();
|
||||
Some((self, timeout))
|
||||
},
|
||||
Err(e) => {
|
||||
error!("error reregistering: {:?}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wakeup<F>(mut self, scope: &mut Scope<F>) -> Option<(Self, Option<Duration>)>
|
||||
where F: MessageHandlerFactory<K, T, Output=H> {
|
||||
loop {
|
||||
match self.ctrl.1.try_recv() {
|
||||
Ok(next) => {
|
||||
trace!("woke up with {:?}", next);
|
||||
self.state.update(next);
|
||||
},
|
||||
Err(_) => break
|
||||
}
|
||||
}
|
||||
self.ready(EventSet::readable() | EventSet::writable(), scope)
|
||||
}
|
||||
|
||||
pub fn timeout<F>(mut self, scope: &mut Scope<F>) -> Option<(Self, Option<Duration>)>
|
||||
where F: MessageHandlerFactory<K, T, Output=H> {
|
||||
//TODO: check if this was a spurious timeout?
|
||||
self.on_error(::Error::Timeout);
|
||||
self.ready(EventSet::none(), scope)
|
||||
}
|
||||
|
||||
fn on_error(&mut self, err: ::Error) {
|
||||
debug!("on_error err = {:?}", err);
|
||||
trace!("on_error state = {:?}", self.state);
|
||||
let next = match self.state {
|
||||
State::Init => Next::remove(),
|
||||
State::Http1(ref mut http1) => http1.handler.on_error(err),
|
||||
State::Closed => Next::remove(),
|
||||
};
|
||||
self.state.update(next);
|
||||
}
|
||||
|
||||
fn on_remove(self) {
|
||||
debug!("on_remove");
|
||||
match self.state {
|
||||
State::Init | State::Closed => (),
|
||||
State::Http1(http1) => http1.handler.on_remove(self.transport),
|
||||
}
|
||||
}
|
||||
|
||||
fn on_readable<F>(&mut self, scope: &mut Scope<F>)
|
||||
where F: MessageHandlerFactory<K, T, Output=H> {
|
||||
trace!("on_readable -> {:?}", self.state);
|
||||
let state = mem::replace(&mut self.state, State::Closed);
|
||||
self.state = self.read(scope, state);
|
||||
trace!("on_readable <- {:?}", self.state);
|
||||
}
|
||||
|
||||
fn on_writable<F>(&mut self, scope: &mut Scope<F>)
|
||||
where F: MessageHandlerFactory<K, T, Output=H> {
|
||||
trace!("on_writable -> {:?}", self.state);
|
||||
let state = mem::replace(&mut self.state, State::Closed);
|
||||
self.state = self.write(scope, state);
|
||||
trace!("on_writable <- {:?}", self.state);
|
||||
}
|
||||
}
|
||||
|
||||
enum State<H: MessageHandler<T>, T: Transport> {
|
||||
Init,
|
||||
/// Http1 will only ever use a connection to send and receive a single
|
||||
/// message at a time. Once a H1 status has been determined, we will either
|
||||
/// be reading or writing an H1 message, and optionally multiple if
|
||||
/// keep-alive is true.
|
||||
Http1(Http1<H, T>),
|
||||
/// Http2 allows multiplexing streams over a single connection. So even
|
||||
/// when we've identified a certain message, we must always parse frame
|
||||
/// head to determine if the incoming frame is part of a current message,
|
||||
/// or a new one. This also means we could have multiple messages at once.
|
||||
//Http2 {},
|
||||
Closed,
|
||||
}
|
||||
|
||||
|
||||
impl<H: MessageHandler<T>, T: Transport> State<H, T> {
|
||||
fn timeout(&self) -> Option<Duration> {
|
||||
match *self {
|
||||
State::Init => None,
|
||||
State::Http1(ref http1) => http1.timeout,
|
||||
State::Closed => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: MessageHandler<T>, T: Transport> fmt::Debug for State<H, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
State::Init => f.write_str("Init"),
|
||||
State::Http1(ref h1) => f.debug_tuple("Http1")
|
||||
.field(h1)
|
||||
.finish(),
|
||||
State::Closed => f.write_str("Closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: MessageHandler<T>, T: Transport> State<H, T> {
|
||||
fn update(&mut self, next: Next) {
|
||||
let timeout = next.timeout;
|
||||
let state = mem::replace(self, State::Closed);
|
||||
let new_state = match (state, next.interest) {
|
||||
(_, Next_::Remove) => State::Closed,
|
||||
(State::Closed, _) => State::Closed,
|
||||
(State::Init, _) => State::Init,
|
||||
(State::Http1(http1), Next_::End) => {
|
||||
let reading = match http1.reading {
|
||||
Reading::Body(ref decoder) if decoder.is_eof() => {
|
||||
if http1.keep_alive {
|
||||
Reading::KeepAlive
|
||||
} else {
|
||||
Reading::Closed
|
||||
}
|
||||
},
|
||||
Reading::KeepAlive => http1.reading,
|
||||
_ => Reading::Closed,
|
||||
};
|
||||
let writing = match http1.writing {
|
||||
Writing::Ready(ref encoder) if encoder.is_eof() => {
|
||||
if http1.keep_alive {
|
||||
Writing::KeepAlive
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
},
|
||||
Writing::Ready(encoder) => {
|
||||
if encoder.is_eof() {
|
||||
if http1.keep_alive {
|
||||
Writing::KeepAlive
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
} else if let Some(buf) = encoder.end() {
|
||||
Writing::Chunk(Chunk {
|
||||
buf: buf.bytes,
|
||||
pos: buf.pos,
|
||||
next: (h1::Encoder::length(0), Next::end())
|
||||
})
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
}
|
||||
Writing::Chunk(mut chunk) => {
|
||||
if chunk.is_written() {
|
||||
let encoder = chunk.next.0;
|
||||
//TODO: de-dupe this code and from Writing::Ready
|
||||
if encoder.is_eof() {
|
||||
if http1.keep_alive {
|
||||
Writing::KeepAlive
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
} else if let Some(buf) = encoder.end() {
|
||||
Writing::Chunk(Chunk {
|
||||
buf: buf.bytes,
|
||||
pos: buf.pos,
|
||||
next: (h1::Encoder::length(0), Next::end())
|
||||
})
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
} else {
|
||||
chunk.next.1 = next;
|
||||
Writing::Chunk(chunk)
|
||||
}
|
||||
},
|
||||
_ => Writing::Closed,
|
||||
};
|
||||
match (reading, writing) {
|
||||
(Reading::KeepAlive, Writing::KeepAlive) => State::Init,
|
||||
(reading, Writing::Chunk(chunk)) => {
|
||||
State::Http1(Http1 {
|
||||
reading: reading,
|
||||
writing: Writing::Chunk(chunk),
|
||||
.. http1
|
||||
})
|
||||
}
|
||||
_ => State::Closed
|
||||
}
|
||||
},
|
||||
(State::Http1(mut http1), Next_::Read) => {
|
||||
http1.reading = match http1.reading {
|
||||
Reading::Init => Reading::Parse,
|
||||
Reading::Wait(decoder) => Reading::Body(decoder),
|
||||
same => same
|
||||
};
|
||||
|
||||
http1.writing = match http1.writing {
|
||||
Writing::Ready(encoder) => if encoder.is_eof() {
|
||||
if http1.keep_alive {
|
||||
Writing::KeepAlive
|
||||
} else {
|
||||
Writing::Closed
|
||||
}
|
||||
} else {
|
||||
Writing::Wait(encoder)
|
||||
},
|
||||
Writing::Chunk(chunk) => if chunk.is_written() {
|
||||
Writing::Wait(chunk.next.0)
|
||||
} else {
|
||||
Writing::Chunk(chunk)
|
||||
},
|
||||
same => same
|
||||
};
|
||||
|
||||
State::Http1(http1)
|
||||
},
|
||||
(State::Http1(mut http1), Next_::Write) => {
|
||||
http1.writing = match http1.writing {
|
||||
Writing::Wait(encoder) => Writing::Ready(encoder),
|
||||
Writing::Init => Writing::Head,
|
||||
Writing::Chunk(chunk) => if chunk.is_written() {
|
||||
Writing::Ready(chunk.next.0)
|
||||
} else {
|
||||
Writing::Chunk(chunk)
|
||||
},
|
||||
same => same
|
||||
};
|
||||
|
||||
http1.reading = match http1.reading {
|
||||
Reading::Body(decoder) => if decoder.is_eof() {
|
||||
if http1.keep_alive {
|
||||
Reading::KeepAlive
|
||||
} else {
|
||||
Reading::Closed
|
||||
}
|
||||
} else {
|
||||
Reading::Wait(decoder)
|
||||
},
|
||||
same => same
|
||||
};
|
||||
State::Http1(http1)
|
||||
},
|
||||
(State::Http1(mut http1), Next_::ReadWrite) => {
|
||||
http1.reading = match http1.reading {
|
||||
Reading::Init => Reading::Parse,
|
||||
Reading::Wait(decoder) => Reading::Body(decoder),
|
||||
same => same
|
||||
};
|
||||
http1.writing = match http1.writing {
|
||||
Writing::Wait(encoder) => Writing::Ready(encoder),
|
||||
Writing::Init => Writing::Head,
|
||||
Writing::Chunk(chunk) => if chunk.is_written() {
|
||||
Writing::Ready(chunk.next.0)
|
||||
} else {
|
||||
Writing::Chunk(chunk)
|
||||
},
|
||||
same => same
|
||||
};
|
||||
State::Http1(http1)
|
||||
},
|
||||
(State::Http1(mut http1), Next_::Wait) => {
|
||||
http1.reading = match http1.reading {
|
||||
Reading::Body(decoder) => Reading::Wait(decoder),
|
||||
same => same
|
||||
};
|
||||
|
||||
http1.writing = match http1.writing {
|
||||
Writing::Ready(encoder) => Writing::Wait(encoder),
|
||||
Writing::Chunk(chunk) => if chunk.is_written() {
|
||||
Writing::Wait(chunk.next.0)
|
||||
} else {
|
||||
Writing::Chunk(chunk)
|
||||
},
|
||||
same => same
|
||||
};
|
||||
State::Http1(http1)
|
||||
}
|
||||
};
|
||||
let new_state = match new_state {
|
||||
State::Http1(mut http1) => {
|
||||
http1.timeout = timeout;
|
||||
State::Http1(http1)
|
||||
}
|
||||
other => other
|
||||
};
|
||||
mem::replace(self, new_state);
|
||||
}
|
||||
}
|
||||
|
||||
// These Reading and Writing stuff should probably get moved into h1/message.rs
|
||||
|
||||
struct Http1<H, T> {
|
||||
handler: H,
|
||||
reading: Reading,
|
||||
writing: Writing,
|
||||
keep_alive: bool,
|
||||
timeout: Option<Duration>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<H, T> fmt::Debug for Http1<H, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Http1")
|
||||
.field("reading", &self.reading)
|
||||
.field("writing", &self.writing)
|
||||
.field("keep_alive", &self.keep_alive)
|
||||
.field("timeout", &self.timeout)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Reading {
|
||||
Init,
|
||||
Parse,
|
||||
Body(h1::Decoder),
|
||||
Wait(h1::Decoder),
|
||||
KeepAlive,
|
||||
Closed
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Writing {
|
||||
Init,
|
||||
Head,
|
||||
Chunk(Chunk) ,
|
||||
Ready(h1::Encoder),
|
||||
Wait(h1::Encoder),
|
||||
KeepAlive,
|
||||
Closed
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Chunk {
|
||||
buf: Cow<'static, [u8]>,
|
||||
pos: usize,
|
||||
next: (h1::Encoder, Next),
|
||||
}
|
||||
|
||||
impl Chunk {
|
||||
fn is_written(&self) -> bool {
|
||||
self.pos >= self.buf.len()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait MessageHandler<T: Transport> {
|
||||
type Message: Http1Message;
|
||||
fn on_incoming(&mut self, head: http::MessageHead<<Self::Message as Http1Message>::Incoming>) -> Next;
|
||||
fn on_outgoing(&mut self, head: &mut http::MessageHead<<Self::Message as Http1Message>::Outgoing>) -> Next;
|
||||
fn on_decode(&mut self, &mut http::Decoder<T>) -> Next;
|
||||
fn on_encode(&mut self, &mut http::Encoder<T>) -> Next;
|
||||
fn on_error(&mut self, err: ::Error) -> Next;
|
||||
|
||||
fn on_remove(self, T) where Self: Sized;
|
||||
}
|
||||
|
||||
pub struct Seed<'a, K: Key + 'a>(&'a K, &'a channel::Sender<Next>);
|
||||
|
||||
impl<'a, K: Key + 'a> Seed<'a, K> {
|
||||
pub fn control(&self) -> Control {
|
||||
Control {
|
||||
tx: self.1.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key(&self) -> &K {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub trait MessageHandlerFactory<K: Key, T: Transport> {
|
||||
type Output: MessageHandler<T>;
|
||||
|
||||
fn create(&mut self, seed: Seed<K>) -> Self::Output;
|
||||
}
|
||||
|
||||
impl<F, K, H, T> MessageHandlerFactory<K, T> for F
|
||||
where F: FnMut(Seed<K>) -> H,
|
||||
K: Key,
|
||||
H: MessageHandler<T>,
|
||||
T: Transport {
|
||||
type Output = H;
|
||||
fn create(&mut self, seed: Seed<K>) -> H {
|
||||
self(seed)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Key: Eq + Hash + Clone {}
|
||||
impl<T: Eq + Hash + Clone> Key for T {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
/* TODO:
|
||||
test when the underlying Transport of a Conn is blocked on an action that
|
||||
differs from the desired interest().
|
||||
|
||||
Ex:
|
||||
transport.blocked() == Some(Blocked::Read)
|
||||
self.interest() == Reg::Write
|
||||
|
||||
Should call `scope.register(EventSet::read())`, not with write
|
||||
|
||||
#[test]
|
||||
fn test_conn_register_when_transport_blocked() {
|
||||
|
||||
}
|
||||
*/
|
||||
}
|
||||
1137
src/http/h1.rs
1137
src/http/h1.rs
File diff suppressed because it is too large
Load Diff
293
src/http/h1/decode.rs
Normal file
293
src/http/h1/decode.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
use std::cmp;
|
||||
use std::io::{self, Read};
|
||||
|
||||
use self::Kind::{Length, Chunked, Eof};
|
||||
|
||||
/// Decoders to handle different Transfer-Encodings.
|
||||
///
|
||||
/// If a message body does not include a Transfer-Encoding, it *should*
|
||||
/// include a Content-Length header.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Decoder {
|
||||
kind: Kind,
|
||||
}
|
||||
|
||||
impl Decoder {
|
||||
pub fn length(x: u64) -> Decoder {
|
||||
Decoder {
|
||||
kind: Kind::Length(x)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn chunked() -> Decoder {
|
||||
Decoder {
|
||||
kind: Kind::Chunked(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn eof() -> Decoder {
|
||||
Decoder {
|
||||
kind: Kind::Eof(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum Kind {
|
||||
/// A Reader used when a Content-Length header is passed with a positive integer.
|
||||
Length(u64),
|
||||
/// A Reader used when Transfer-Encoding is `chunked`.
|
||||
Chunked(Option<u64>),
|
||||
/// A Reader used for responses that don't indicate a length or chunked.
|
||||
///
|
||||
/// Note: This should only used for `Response`s. It is illegal for a
|
||||
/// `Request` to be made with both `Content-Length` and
|
||||
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
|
||||
///
|
||||
/// > If a Transfer-Encoding header field is present in a response and
|
||||
/// > the chunked transfer coding is not the final encoding, the
|
||||
/// > message body length is determined by reading the connection until
|
||||
/// > it is closed by the server. If a Transfer-Encoding header field
|
||||
/// > is present in a request and the chunked transfer coding is not
|
||||
/// > the final encoding, the message body length cannot be determined
|
||||
/// > reliably; the server MUST respond with the 400 (Bad Request)
|
||||
/// > status code and then close the connection.
|
||||
Eof(bool),
|
||||
}
|
||||
|
||||
impl Decoder {
|
||||
pub fn is_eof(&self) -> bool {
|
||||
trace!("is_eof? {:?}", self);
|
||||
match self.kind {
|
||||
Length(0) |
|
||||
Chunked(Some(0)) |
|
||||
Eof(true) => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder {
|
||||
pub fn decode<R: Read>(&mut self, body: &mut R, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self.kind {
|
||||
Length(ref mut remaining) => {
|
||||
trace!("Sized read, remaining={:?}", remaining);
|
||||
if *remaining == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
let to_read = cmp::min(*remaining as usize, buf.len());
|
||||
let num = try!(body.read(&mut buf[..to_read])) as u64;
|
||||
trace!("Length read: {}", num);
|
||||
if num > *remaining {
|
||||
*remaining = 0;
|
||||
} else if num == 0 {
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
|
||||
} else {
|
||||
*remaining -= num;
|
||||
}
|
||||
Ok(num as usize)
|
||||
}
|
||||
},
|
||||
Chunked(ref mut opt_remaining) => {
|
||||
let mut rem = match *opt_remaining {
|
||||
Some(ref rem) => *rem,
|
||||
// None means we don't know the size of the next chunk
|
||||
None => try!(read_chunk_size(body))
|
||||
};
|
||||
trace!("Chunked read, remaining={:?}", rem);
|
||||
|
||||
if rem == 0 {
|
||||
*opt_remaining = Some(0);
|
||||
|
||||
// chunk of size 0 signals the end of the chunked stream
|
||||
// if the 0 digit was missing from the stream, it would
|
||||
// be an InvalidInput error instead.
|
||||
trace!("end of chunked");
|
||||
return Ok(0)
|
||||
}
|
||||
|
||||
let to_read = cmp::min(rem as usize, buf.len());
|
||||
let count = try!(body.read(&mut buf[..to_read])) as u64;
|
||||
|
||||
if count == 0 {
|
||||
*opt_remaining = Some(0);
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
|
||||
}
|
||||
|
||||
rem -= count;
|
||||
*opt_remaining = if rem > 0 {
|
||||
Some(rem)
|
||||
} else {
|
||||
try!(eat(body, b"\r\n"));
|
||||
None
|
||||
};
|
||||
Ok(count as usize)
|
||||
},
|
||||
Eof(ref mut is_eof) => {
|
||||
match body.read(buf) {
|
||||
Ok(0) => {
|
||||
*is_eof = true;
|
||||
Ok(0)
|
||||
}
|
||||
other => other
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn eat<R: Read>(rdr: &mut R, bytes: &[u8]) -> io::Result<()> {
|
||||
let mut buf = [0];
|
||||
for &b in bytes.iter() {
|
||||
match try!(rdr.read(&mut buf)) {
|
||||
1 if buf[0] == b => (),
|
||||
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
|
||||
"Invalid characters found")),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Chunked chunks start with 1*HEXDIGIT, indicating the size of the chunk.
|
||||
fn read_chunk_size<R: Read>(rdr: &mut R) -> io::Result<u64> {
|
||||
macro_rules! byte (
|
||||
($rdr:ident) => ({
|
||||
let mut buf = [0];
|
||||
match try!($rdr.read(&mut buf)) {
|
||||
1 => buf[0],
|
||||
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line")),
|
||||
|
||||
}
|
||||
})
|
||||
);
|
||||
let mut size = 0u64;
|
||||
let radix = 16;
|
||||
let mut in_ext = false;
|
||||
let mut in_chunk_size = true;
|
||||
loop {
|
||||
match byte!(rdr) {
|
||||
b@b'0'...b'9' if in_chunk_size => {
|
||||
size *= radix;
|
||||
size += (b - b'0') as u64;
|
||||
},
|
||||
b@b'a'...b'f' if in_chunk_size => {
|
||||
size *= radix;
|
||||
size += (b + 10 - b'a') as u64;
|
||||
},
|
||||
b@b'A'...b'F' if in_chunk_size => {
|
||||
size *= radix;
|
||||
size += (b + 10 - b'A') as u64;
|
||||
},
|
||||
b'\r' => {
|
||||
match byte!(rdr) {
|
||||
b'\n' => break,
|
||||
_ => return Err(io::Error::new(io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line"))
|
||||
|
||||
}
|
||||
},
|
||||
// If we weren't in the extension yet, the ";" signals its start
|
||||
b';' if !in_ext => {
|
||||
in_ext = true;
|
||||
in_chunk_size = false;
|
||||
},
|
||||
// "Linear white space" is ignored between the chunk size and the
|
||||
// extension separator token (";") due to the "implied *LWS rule".
|
||||
b'\t' | b' ' if !in_ext & !in_chunk_size => {},
|
||||
// LWS can follow the chunk size, but no more digits can come
|
||||
b'\t' | b' ' if in_chunk_size => in_chunk_size = false,
|
||||
// We allow any arbitrary octet once we are in the extension, since
|
||||
// they all get ignored anyway. According to the HTTP spec, valid
|
||||
// extensions would have a more strict syntax:
|
||||
// (token ["=" (token | quoted-string)])
|
||||
// but we gain nothing by rejecting an otherwise valid chunk size.
|
||||
_ext if in_ext => {
|
||||
//TODO: chunk extension byte;
|
||||
},
|
||||
// Finally, if we aren't in the extension and we're reading any
|
||||
// other octet, the chunk size line is invalid!
|
||||
_ => {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line"));
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("chunk size={:?}", size);
|
||||
Ok(size)
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::error::Error;
|
||||
use std::io;
|
||||
use super::{Decoder, read_chunk_size};
|
||||
|
||||
#[test]
|
||||
fn test_read_chunk_size() {
|
||||
fn read(s: &str, result: u64) {
|
||||
assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap(), result);
|
||||
}
|
||||
|
||||
fn read_err(s: &str) {
|
||||
assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap_err().kind(),
|
||||
io::ErrorKind::InvalidInput);
|
||||
}
|
||||
|
||||
read("1\r\n", 1);
|
||||
read("01\r\n", 1);
|
||||
read("0\r\n", 0);
|
||||
read("00\r\n", 0);
|
||||
read("A\r\n", 10);
|
||||
read("a\r\n", 10);
|
||||
read("Ff\r\n", 255);
|
||||
read("Ff \r\n", 255);
|
||||
// Missing LF or CRLF
|
||||
read_err("F\rF");
|
||||
read_err("F");
|
||||
// Invalid hex digit
|
||||
read_err("X\r\n");
|
||||
read_err("1X\r\n");
|
||||
read_err("-\r\n");
|
||||
read_err("-1\r\n");
|
||||
// Acceptable (if not fully valid) extensions do not influence the size
|
||||
read("1;extension\r\n", 1);
|
||||
read("a;ext name=value\r\n", 10);
|
||||
read("1;extension;extension2\r\n", 1);
|
||||
read("1;;; ;\r\n", 1);
|
||||
read("2; extension...\r\n", 2);
|
||||
read("3 ; extension=123\r\n", 3);
|
||||
read("3 ;\r\n", 3);
|
||||
read("3 ; \r\n", 3);
|
||||
// Invalid extensions cause an error
|
||||
read_err("1 invalid extension\r\n");
|
||||
read_err("1 A\r\n");
|
||||
read_err("1;no CRLF");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_sized_early_eof() {
|
||||
let mut bytes = &b"foo bar"[..];
|
||||
let mut decoder = Decoder::length(10);
|
||||
let mut buf = [0u8; 10];
|
||||
assert_eq!(decoder.decode(&mut bytes, &mut buf).unwrap(), 7);
|
||||
let e = decoder.decode(&mut bytes, &mut buf).unwrap_err();
|
||||
assert_eq!(e.kind(), io::ErrorKind::Other);
|
||||
assert_eq!(e.description(), "early eof");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_chunked_early_eof() {
|
||||
let mut bytes = &b"\
|
||||
9\r\n\
|
||||
foo bar\
|
||||
"[..];
|
||||
let mut decoder = Decoder::chunked();
|
||||
let mut buf = [0u8; 10];
|
||||
assert_eq!(decoder.decode(&mut bytes, &mut buf).unwrap(), 7);
|
||||
let e = decoder.decode(&mut bytes, &mut buf).unwrap_err();
|
||||
assert_eq!(e.kind(), io::ErrorKind::Other);
|
||||
assert_eq!(e.description(), "early eof");
|
||||
}
|
||||
}
|
||||
371
src/http/h1/encode.rs
Normal file
371
src/http/h1/encode.rs
Normal file
@@ -0,0 +1,371 @@
|
||||
use std::borrow::Cow;
|
||||
use std::cmp;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use http::internal::{AtomicWrite, WriteBuf};
|
||||
|
||||
/// Encoders to handle different Transfer-Encodings.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Encoder {
|
||||
kind: Kind,
|
||||
prefix: Prefix, //Option<WriteBuf<Vec<u8>>>
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
enum Kind {
|
||||
/// An Encoder for when Transfer-Encoding includes `chunked`.
|
||||
Chunked(Chunked),
|
||||
/// An Encoder for when Content-Length is set.
|
||||
///
|
||||
/// Enforces that the body is not longer than the Content-Length header.
|
||||
Length(u64),
|
||||
}
|
||||
|
||||
impl Encoder {
|
||||
pub fn chunked() -> Encoder {
|
||||
Encoder {
|
||||
kind: Kind::Chunked(Chunked::Init),
|
||||
prefix: Prefix(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn length(len: u64) -> Encoder {
|
||||
Encoder {
|
||||
kind: Kind::Length(len),
|
||||
prefix: Prefix(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prefix(&mut self, prefix: WriteBuf<Vec<u8>>) {
|
||||
self.prefix.0 = Some(prefix);
|
||||
}
|
||||
|
||||
pub fn is_eof(&self) -> bool {
|
||||
if self.prefix.0.is_some() {
|
||||
return false;
|
||||
}
|
||||
match self.kind {
|
||||
Kind::Length(0) |
|
||||
Kind::Chunked(Chunked::End) => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn end(self) -> Option<WriteBuf<Cow<'static, [u8]>>> {
|
||||
let trailer = self.trailer();
|
||||
let buf = self.prefix.0;
|
||||
|
||||
match (buf, trailer) {
|
||||
(Some(mut buf), Some(trailer)) => {
|
||||
buf.bytes.extend_from_slice(trailer);
|
||||
Some(WriteBuf {
|
||||
bytes: Cow::Owned(buf.bytes),
|
||||
pos: buf.pos,
|
||||
})
|
||||
},
|
||||
(Some(buf), None) => Some(WriteBuf {
|
||||
bytes: Cow::Owned(buf.bytes),
|
||||
pos: buf.pos
|
||||
}),
|
||||
(None, Some(trailer)) => {
|
||||
Some(WriteBuf {
|
||||
bytes: Cow::Borrowed(trailer),
|
||||
pos: 0,
|
||||
})
|
||||
},
|
||||
(None, None) => None
|
||||
}
|
||||
}
|
||||
|
||||
fn trailer(&self) -> Option<&'static [u8]> {
|
||||
match self.kind {
|
||||
Kind::Chunked(Chunked::Init) => {
|
||||
Some(b"0\r\n\r\n")
|
||||
}
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<W: AtomicWrite>(&mut self, w: &mut W, msg: &[u8]) -> io::Result<usize> {
|
||||
match self.kind {
|
||||
Kind::Chunked(ref mut chunked) => {
|
||||
chunked.encode(w, &mut self.prefix, msg)
|
||||
},
|
||||
Kind::Length(ref mut remaining) => {
|
||||
let mut n = {
|
||||
let max = cmp::min(*remaining as usize, msg.len());
|
||||
let slice = &msg[..max];
|
||||
|
||||
let prefix = self.prefix.0.as_ref().map(|buf| &buf.bytes[buf.pos..]).unwrap_or(b"");
|
||||
|
||||
try!(w.write_atomic(&[prefix, slice]))
|
||||
};
|
||||
|
||||
n = self.prefix.update(n);
|
||||
if n == 0 {
|
||||
return Err(io::Error::new(io::ErrorKind::WouldBlock, "would block"));
|
||||
}
|
||||
|
||||
*remaining -= n as u64;
|
||||
Ok(n)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
enum Chunked {
|
||||
Init,
|
||||
Size(ChunkSize),
|
||||
SizeCr,
|
||||
SizeLf,
|
||||
Body(usize),
|
||||
BodyCr,
|
||||
BodyLf,
|
||||
End,
|
||||
}
|
||||
|
||||
impl Chunked {
|
||||
fn encode<W: AtomicWrite>(&mut self, w: &mut W, prefix: &mut Prefix, msg: &[u8]) -> io::Result<usize> {
|
||||
match *self {
|
||||
Chunked::Init => {
|
||||
let mut size = ChunkSize {
|
||||
bytes: [0; CHUNK_SIZE_MAX_BYTES],
|
||||
pos: 0,
|
||||
len: 0,
|
||||
};
|
||||
trace!("chunked write, size = {:?}", msg.len());
|
||||
write!(&mut size, "{:X}", msg.len())
|
||||
.expect("CHUNK_SIZE_MAX_BYTES should fit any usize");
|
||||
*self = Chunked::Size(size);
|
||||
}
|
||||
Chunked::End => return Ok(0),
|
||||
_ => {}
|
||||
}
|
||||
let mut n = {
|
||||
let pieces = match *self {
|
||||
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
|
||||
Chunked::Size(ref size) => [
|
||||
prefix.0.as_ref().map(|buf| &buf.bytes[buf.pos..]).unwrap_or(b""),
|
||||
&size.bytes[size.pos.into() .. size.len.into()],
|
||||
&b"\r\n"[..],
|
||||
msg,
|
||||
&b"\r\n"[..],
|
||||
],
|
||||
Chunked::SizeCr => [
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b"\r\n"[..],
|
||||
msg,
|
||||
&b"\r\n"[..],
|
||||
],
|
||||
Chunked::SizeLf => [
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b"\n"[..],
|
||||
msg,
|
||||
&b"\r\n"[..],
|
||||
],
|
||||
Chunked::Body(pos) => [
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&msg[pos..],
|
||||
&b"\r\n"[..],
|
||||
],
|
||||
Chunked::BodyCr => [
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b"\r\n"[..],
|
||||
],
|
||||
Chunked::BodyLf => [
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b""[..],
|
||||
&b"\n"[..],
|
||||
],
|
||||
Chunked::End => unreachable!("Chunked::End shouldn't write more")
|
||||
};
|
||||
try!(w.write_atomic(&pieces))
|
||||
};
|
||||
|
||||
if n > 0 {
|
||||
n = prefix.update(n);
|
||||
}
|
||||
while n > 0 {
|
||||
match *self {
|
||||
Chunked::Init => unreachable!("Chunked::Init should have become Chunked::Size"),
|
||||
Chunked::Size(mut size) => {
|
||||
n = size.update(n);
|
||||
if size.len == 0 {
|
||||
*self = Chunked::SizeCr;
|
||||
} else {
|
||||
*self = Chunked::Size(size);
|
||||
}
|
||||
},
|
||||
Chunked::SizeCr => {
|
||||
*self = Chunked::SizeLf;
|
||||
n -= 1;
|
||||
}
|
||||
Chunked::SizeLf => {
|
||||
*self = Chunked::Body(0);
|
||||
n -= 1;
|
||||
}
|
||||
Chunked::Body(pos) => {
|
||||
let left = msg.len() - pos;
|
||||
if n >= left {
|
||||
*self = Chunked::BodyCr;
|
||||
n -= left;
|
||||
} else {
|
||||
*self = Chunked::Body(pos + n);
|
||||
n = 0;
|
||||
}
|
||||
}
|
||||
Chunked::BodyCr => {
|
||||
*self = Chunked::BodyLf;
|
||||
n -= 1;
|
||||
}
|
||||
Chunked::BodyLf => {
|
||||
assert!(n == 1);
|
||||
*self = if msg.len() == 0 {
|
||||
Chunked::End
|
||||
} else {
|
||||
Chunked::Init
|
||||
};
|
||||
n = 0;
|
||||
},
|
||||
Chunked::End => unreachable!("Chunked::End shouldn't have any to write")
|
||||
}
|
||||
}
|
||||
|
||||
match *self {
|
||||
Chunked::Init |
|
||||
Chunked::End => Ok(msg.len()),
|
||||
_ => Err(io::Error::new(io::ErrorKind::WouldBlock, "chunked incomplete"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
const USIZE_BYTES: usize = 4;
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
const USIZE_BYTES: usize = 8;
|
||||
|
||||
// each byte will become 2 hex
|
||||
const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct ChunkSize {
|
||||
bytes: [u8; CHUNK_SIZE_MAX_BYTES],
|
||||
pos: u8,
|
||||
len: u8,
|
||||
}
|
||||
|
||||
impl ChunkSize {
|
||||
fn update(&mut self, n: usize) -> usize {
|
||||
let diff = (self.len - self.pos).into();
|
||||
if n >= diff {
|
||||
self.pos = 0;
|
||||
self.len = 0;
|
||||
n - diff
|
||||
} else {
|
||||
self.pos += n as u8; // just verified it was a small usize
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for ChunkSize {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
f.debug_struct("ChunkSize")
|
||||
.field("bytes", &&self.bytes[..self.len.into()])
|
||||
.field("pos", &self.pos)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::cmp::PartialEq for ChunkSize {
|
||||
fn eq(&self, other: &ChunkSize) -> bool {
|
||||
self.len == other.len &&
|
||||
self.pos == other.pos &&
|
||||
(&self.bytes[..]) == (&other.bytes[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Write for ChunkSize {
|
||||
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
|
||||
let n = (&mut self.bytes[self.len.into() ..]).write(msg)
|
||||
.expect("&mut [u8].write() cannot error");
|
||||
self.len += n as u8; // safe because bytes is never bigger than 256
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Prefix(Option<WriteBuf<Vec<u8>>>);
|
||||
|
||||
impl Prefix {
|
||||
fn update(&mut self, n: usize) -> usize {
|
||||
if let Some(mut buf) = self.0.take() {
|
||||
if buf.bytes.len() - buf.pos > n {
|
||||
buf.pos += n;
|
||||
self.0 = Some(buf);
|
||||
0
|
||||
} else {
|
||||
let nbuf = buf.bytes.len() - buf.pos;
|
||||
n - nbuf
|
||||
}
|
||||
} else {
|
||||
n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Encoder;
|
||||
use mock::{Async, Buf};
|
||||
|
||||
#[test]
|
||||
fn test_write_chunked_sync() {
|
||||
let mut dst = Buf::new();
|
||||
let mut encoder = Encoder::chunked();
|
||||
|
||||
encoder.encode(&mut dst, b"foo bar").unwrap();
|
||||
encoder.encode(&mut dst, b"baz quux herp").unwrap();
|
||||
encoder.encode(&mut dst, b"").unwrap();
|
||||
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_chunked_async() {
|
||||
let mut dst = Async::new(Buf::new(), 7);
|
||||
let mut encoder = Encoder::chunked();
|
||||
|
||||
assert!(encoder.encode(&mut dst, b"foo bar").is_err());
|
||||
dst.block_in(6);
|
||||
assert_eq!(7, encoder.encode(&mut dst, b"foo bar").unwrap());
|
||||
dst.block_in(30);
|
||||
assert_eq!(13, encoder.encode(&mut dst, b"baz quux herp").unwrap());
|
||||
encoder.encode(&mut dst, b"").unwrap();
|
||||
assert_eq!(&dst[..], &b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_sized() {
|
||||
let mut dst = Buf::new();
|
||||
let mut encoder = Encoder::length(8);
|
||||
encoder.encode(&mut dst, b"foo bar").unwrap();
|
||||
assert_eq!(encoder.encode(&mut dst, b"baz").unwrap(), 1);
|
||||
|
||||
assert_eq!(dst, b"foo barb");
|
||||
}
|
||||
}
|
||||
136
src/http/h1/mod.rs
Normal file
136
src/http/h1/mod.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
use std::fmt;
|
||||
use std::io::{self, Write};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::mpsc;
|
||||
|
||||
use url::Url;
|
||||
use tick;
|
||||
use time::now_utc;
|
||||
|
||||
use header::{self, Headers};
|
||||
use http::{self, conn};
|
||||
use method::Method;
|
||||
use net::{Fresh, Streaming};
|
||||
use status::StatusCode;
|
||||
use version::HttpVersion;
|
||||
*/
|
||||
|
||||
pub use self::decode::Decoder;
|
||||
pub use self::encode::Encoder;
|
||||
|
||||
pub use self::parse::parse;
|
||||
|
||||
mod decode;
|
||||
mod encode;
|
||||
mod parse;
|
||||
|
||||
/*
|
||||
fn should_have_response_body(method: &Method, status: u16) -> bool {
|
||||
trace!("should_have_response_body({:?}, {})", method, status);
|
||||
match (method, status) {
|
||||
(&Method::Head, _) |
|
||||
(_, 100...199) |
|
||||
(_, 204) |
|
||||
(_, 304) |
|
||||
(&Method::Connect, 200...299) => false,
|
||||
_ => true
|
||||
}
|
||||
}
|
||||
*/
|
||||
/*
|
||||
const MAX_INVALID_RESPONSE_BYTES: usize = 1024 * 128;
|
||||
impl HttpMessage for Http11Message {
|
||||
|
||||
fn get_incoming(&mut self) -> ::Result<ResponseHead> {
|
||||
unimplemented!();
|
||||
/*
|
||||
try!(self.flush_outgoing());
|
||||
let stream = match self.stream.take() {
|
||||
Some(stream) => stream,
|
||||
None => {
|
||||
// The message was already in the reading state...
|
||||
// TODO Decide what happens in case we try to get a new incoming at that point
|
||||
return Err(From::from(
|
||||
io::Error::new(io::ErrorKind::Other,
|
||||
"Read already in progress")));
|
||||
}
|
||||
};
|
||||
|
||||
let expected_no_content = stream.previous_response_expected_no_content();
|
||||
trace!("previous_response_expected_no_content = {}", expected_no_content);
|
||||
|
||||
let mut stream = BufReader::new(stream);
|
||||
|
||||
let mut invalid_bytes_read = 0;
|
||||
let head;
|
||||
loop {
|
||||
head = match parse_response(&mut stream) {
|
||||
Ok(head) => head,
|
||||
Err(::Error::Version)
|
||||
if expected_no_content && invalid_bytes_read < MAX_INVALID_RESPONSE_BYTES => {
|
||||
trace!("expected_no_content, found content");
|
||||
invalid_bytes_read += 1;
|
||||
stream.consume(1);
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
self.stream = Some(stream.into_inner());
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
|
||||
let raw_status = head.subject;
|
||||
let headers = head.headers;
|
||||
|
||||
let method = self.method.take().unwrap_or(Method::Get);
|
||||
|
||||
let is_empty = !should_have_response_body(&method, raw_status.0);
|
||||
stream.get_mut().set_previous_response_expected_no_content(is_empty);
|
||||
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
|
||||
// 1. HEAD reponses, and Status 1xx, 204, and 304 cannot have a body.
|
||||
// 2. Status 2xx to a CONNECT cannot have a body.
|
||||
// 3. Transfer-Encoding: chunked has a chunked body.
|
||||
// 4. If multiple differing Content-Length headers or invalid, close connection.
|
||||
// 5. Content-Length header has a sized body.
|
||||
// 6. Not Client.
|
||||
// 7. Read till EOF.
|
||||
self.reader = Some(if is_empty {
|
||||
SizedReader(stream, 0)
|
||||
} else {
|
||||
if let Some(&TransferEncoding(ref codings)) = headers.get() {
|
||||
if codings.last() == Some(&Chunked) {
|
||||
ChunkedReader(stream, None)
|
||||
} else {
|
||||
trace!("not chuncked. read till eof");
|
||||
EofReader(stream)
|
||||
}
|
||||
} else if let Some(&ContentLength(len)) = headers.get() {
|
||||
SizedReader(stream, len)
|
||||
} else if headers.has::<ContentLength>() {
|
||||
trace!("illegal Content-Length: {:?}", headers.get_raw("Content-Length"));
|
||||
return Err(Error::Header);
|
||||
} else {
|
||||
trace!("neither Transfer-Encoding nor Content-Length");
|
||||
EofReader(stream)
|
||||
}
|
||||
});
|
||||
|
||||
trace!("Http11Message.reader = {:?}", self.reader);
|
||||
|
||||
|
||||
Ok(ResponseHead {
|
||||
headers: headers,
|
||||
raw_status: raw_status,
|
||||
version: head.version,
|
||||
})
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
*/
|
||||
|
||||
|
||||
246
src/http/h1/parse.rs
Normal file
246
src/http/h1/parse.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::Write;
|
||||
|
||||
use httparse;
|
||||
|
||||
use header::{self, Headers, ContentLength, TransferEncoding};
|
||||
use http::{MessageHead, RawStatus, Http1Message, ParseResult, Next, ServerMessage, ClientMessage, Next_, RequestLine};
|
||||
use http::h1::{Encoder, Decoder};
|
||||
use method::Method;
|
||||
use status::StatusCode;
|
||||
use version::HttpVersion::{Http10, Http11};
|
||||
|
||||
const MAX_HEADERS: usize = 100;
|
||||
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
|
||||
|
||||
pub fn parse<T: Http1Message<Incoming=I>, I>(buf: &[u8]) -> ParseResult<I> {
|
||||
if buf.len() == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
trace!("parse({:?})", buf);
|
||||
<T as Http1Message>::parse(buf)
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl Http1Message for ServerMessage {
|
||||
type Incoming = RequestLine;
|
||||
type Outgoing = StatusCode;
|
||||
|
||||
fn initial_interest() -> Next {
|
||||
Next::new(Next_::Read)
|
||||
}
|
||||
|
||||
fn parse(buf: &[u8]) -> ParseResult<RequestLine> {
|
||||
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
|
||||
trace!("Request.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
|
||||
let mut req = httparse::Request::new(&mut headers);
|
||||
Ok(match try!(req.parse(buf)) {
|
||||
httparse::Status::Complete(len) => {
|
||||
trace!("Request.parse Complete({})", len);
|
||||
Some((MessageHead {
|
||||
version: if req.version.unwrap() == 1 { Http11 } else { Http10 },
|
||||
subject: RequestLine(
|
||||
try!(req.method.unwrap().parse()),
|
||||
try!(req.path.unwrap().parse())
|
||||
),
|
||||
headers: try!(Headers::from_raw(req.headers))
|
||||
}, len))
|
||||
},
|
||||
httparse::Status::Partial => None
|
||||
})
|
||||
}
|
||||
|
||||
fn decoder(head: &MessageHead<Self::Incoming>) -> ::Result<Decoder> {
|
||||
use ::header;
|
||||
if let Some(&header::ContentLength(len)) = head.headers.get() {
|
||||
Ok(Decoder::length(len))
|
||||
} else if head.headers.has::<header::TransferEncoding>() {
|
||||
//TODO: check for Transfer-Encoding: chunked
|
||||
Ok(Decoder::chunked())
|
||||
} else {
|
||||
Ok(Decoder::length(0))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn encode(mut head: MessageHead<Self::Outgoing>, dst: &mut Vec<u8>) -> Encoder {
|
||||
use ::header;
|
||||
trace!("writing head: {:?}", head);
|
||||
|
||||
if !head.headers.has::<header::Date>() {
|
||||
head.headers.set(header::Date(header::HttpDate(::time::now_utc())));
|
||||
}
|
||||
|
||||
let mut is_chunked = true;
|
||||
let mut body = Encoder::chunked();
|
||||
if let Some(cl) = head.headers.get::<header::ContentLength>() {
|
||||
body = Encoder::length(**cl);
|
||||
is_chunked = false
|
||||
}
|
||||
|
||||
if is_chunked {
|
||||
let encodings = match head.headers.get_mut::<header::TransferEncoding>() {
|
||||
Some(&mut header::TransferEncoding(ref mut encodings)) => {
|
||||
if encodings.last() != Some(&header::Encoding::Chunked) {
|
||||
encodings.push(header::Encoding::Chunked);
|
||||
}
|
||||
false
|
||||
},
|
||||
None => true
|
||||
};
|
||||
|
||||
if encodings {
|
||||
head.headers.set(header::TransferEncoding(vec![header::Encoding::Chunked]));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
|
||||
dst.reserve(init_cap);
|
||||
debug!("writing {:#?}", head.headers);
|
||||
let _ = write!(dst, "{} {}\r\n{}\r\n", head.version, head.subject, head.headers);
|
||||
|
||||
body
|
||||
}
|
||||
}
|
||||
|
||||
impl Http1Message for ClientMessage {
|
||||
type Incoming = RawStatus;
|
||||
type Outgoing = RequestLine;
|
||||
|
||||
|
||||
fn initial_interest() -> Next {
|
||||
Next::new(Next_::Write)
|
||||
}
|
||||
|
||||
fn parse(buf: &[u8]) -> ParseResult<RawStatus> {
|
||||
let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS];
|
||||
trace!("Response.parse([Header; {}], [u8; {}])", headers.len(), buf.len());
|
||||
let mut res = httparse::Response::new(&mut headers);
|
||||
Ok(match try!(res.parse(buf)) {
|
||||
httparse::Status::Complete(len) => {
|
||||
trace!("Response.try_parse Complete({})", len);
|
||||
let code = res.code.unwrap();
|
||||
let reason = match StatusCode::from_u16(code).canonical_reason() {
|
||||
Some(reason) if reason == res.reason.unwrap() => Cow::Borrowed(reason),
|
||||
_ => Cow::Owned(res.reason.unwrap().to_owned())
|
||||
};
|
||||
Some((MessageHead {
|
||||
version: if res.version.unwrap() == 1 { Http11 } else { Http10 },
|
||||
subject: RawStatus(code, reason),
|
||||
headers: try!(Headers::from_raw(res.headers))
|
||||
}, len))
|
||||
},
|
||||
httparse::Status::Partial => None
|
||||
})
|
||||
}
|
||||
|
||||
fn decoder(inc: &MessageHead<Self::Incoming>) -> ::Result<Decoder> {
|
||||
use ::header;
|
||||
// According to https://tools.ietf.org/html/rfc7230#section-3.3.3
|
||||
// 1. HEAD reponses, and Status 1xx, 204, and 304 cannot have a body.
|
||||
// 2. Status 2xx to a CONNECT cannot have a body.
|
||||
//
|
||||
// First two steps taken care of before this method.
|
||||
//
|
||||
// 3. Transfer-Encoding: chunked has a chunked body.
|
||||
// 4. If multiple differing Content-Length headers or invalid, close connection.
|
||||
// 5. Content-Length header has a sized body.
|
||||
// 6. Not Client.
|
||||
// 7. Read till EOF.
|
||||
if let Some(&header::TransferEncoding(ref codings)) = inc.headers.get() {
|
||||
if codings.last() == Some(&header::Encoding::Chunked) {
|
||||
Ok(Decoder::chunked())
|
||||
} else {
|
||||
trace!("not chuncked. read till eof");
|
||||
Ok(Decoder::eof())
|
||||
}
|
||||
} else if let Some(&header::ContentLength(len)) = inc.headers.get() {
|
||||
Ok(Decoder::length(len))
|
||||
} else if inc.headers.has::<header::ContentLength>() {
|
||||
trace!("illegal Content-Length: {:?}", inc.headers.get_raw("Content-Length"));
|
||||
Err(::Error::Header)
|
||||
} else {
|
||||
trace!("neither Transfer-Encoding nor Content-Length");
|
||||
Ok(Decoder::eof())
|
||||
}
|
||||
}
|
||||
|
||||
fn encode(mut head: MessageHead<Self::Outgoing>, dst: &mut Vec<u8>) -> Encoder {
|
||||
trace!("writing head: {:?}", head);
|
||||
|
||||
|
||||
let mut body = Encoder::length(0);
|
||||
let expects_no_body = match head.subject.0 {
|
||||
Method::Head | Method::Get | Method::Connect => true,
|
||||
_ => false
|
||||
};
|
||||
let mut chunked = false;
|
||||
|
||||
if let Some(con_len) = head.headers.get::<ContentLength>() {
|
||||
body = Encoder::length(**con_len);
|
||||
} else {
|
||||
chunked = !expects_no_body;
|
||||
}
|
||||
|
||||
if chunked {
|
||||
body = Encoder::chunked();
|
||||
let encodings = match head.headers.get_mut::<TransferEncoding>() {
|
||||
Some(encodings) => {
|
||||
//TODO: check if Chunked already exists
|
||||
encodings.push(header::Encoding::Chunked);
|
||||
true
|
||||
},
|
||||
None => false
|
||||
};
|
||||
|
||||
if !encodings {
|
||||
head.headers.set(TransferEncoding(vec![header::Encoding::Chunked]));
|
||||
}
|
||||
}
|
||||
|
||||
let init_cap = 30 + head.headers.len() * AVERAGE_HEADER_SIZE;
|
||||
dst.reserve(init_cap);
|
||||
debug!("writing {:#?}", head.headers);
|
||||
let _ = write!(dst, "{} {}\r\n{}\r\n", head.subject, head.version, head.headers);
|
||||
|
||||
body
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use http;
|
||||
use super::{parse};
|
||||
|
||||
#[test]
|
||||
fn test_parse_request() {
|
||||
let raw = b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n";
|
||||
parse::<http::ServerMessage, _>(raw).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_raw_status() {
|
||||
let raw = b"HTTP/1.1 200 OK\r\n\r\n";
|
||||
let (res, _) = parse::<http::ClientMessage, _>(raw).unwrap().unwrap();
|
||||
assert_eq!(res.subject.1, "OK");
|
||||
|
||||
let raw = b"HTTP/1.1 200 Howdy\r\n\r\n";
|
||||
let (res, _) = parse::<http::ClientMessage, _>(raw).unwrap().unwrap();
|
||||
assert_eq!(res.subject.1, "Howdy");
|
||||
}
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
use test::Bencher;
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
#[bench]
|
||||
fn bench_parse_incoming(b: &mut Bencher) {
|
||||
let raw = b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n";
|
||||
b.iter(|| {
|
||||
parse::<http::ServerMessage, _>(raw).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
//! Defines the `HttpMessage` trait that serves to encapsulate the operations of a single
|
||||
//! request-response cycle on any HTTP connection.
|
||||
|
||||
use std::any::{Any, TypeId};
|
||||
use std::fmt::Debug;
|
||||
use std::io::{Read, Write};
|
||||
use std::mem;
|
||||
|
||||
use std::io;
|
||||
use std::time::Duration;
|
||||
|
||||
use typeable::Typeable;
|
||||
|
||||
use header::Headers;
|
||||
use http::RawStatus;
|
||||
use url::Url;
|
||||
|
||||
use method;
|
||||
use version;
|
||||
use traitobject;
|
||||
|
||||
/// The trait provides an API for creating new `HttpMessage`s depending on the underlying HTTP
|
||||
/// protocol.
|
||||
pub trait Protocol {
|
||||
/// Creates a fresh `HttpMessage` bound to the given host, based on the given protocol scheme.
|
||||
fn new_message(&self, host: &str, port: u16, scheme: &str) -> ::Result<Box<HttpMessage>>;
|
||||
}
|
||||
|
||||
/// Describes a request.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RequestHead {
|
||||
/// The headers of the request
|
||||
pub headers: Headers,
|
||||
/// The method of the request
|
||||
pub method: method::Method,
|
||||
/// The URL of the request
|
||||
pub url: Url,
|
||||
}
|
||||
|
||||
/// Describes a response.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ResponseHead {
|
||||
/// The headers of the reponse
|
||||
pub headers: Headers,
|
||||
/// The raw status line of the response
|
||||
pub raw_status: RawStatus,
|
||||
/// The HTTP/2 version which generated the response
|
||||
pub version: version::HttpVersion,
|
||||
}
|
||||
|
||||
/// The trait provides an API for sending an receiving HTTP messages.
|
||||
pub trait HttpMessage: Write + Read + Send + Any + Typeable + Debug {
|
||||
/// Initiates a new outgoing request.
|
||||
///
|
||||
/// Only the request's head is provided (in terms of the `RequestHead` struct).
|
||||
///
|
||||
/// After this, the `HttpMessage` instance can be used as an `io::Write` in order to write the
|
||||
/// body of the request.
|
||||
fn set_outgoing(&mut self, head: RequestHead) -> ::Result<RequestHead>;
|
||||
/// Obtains the incoming response and returns its head (i.e. the `ResponseHead` struct)
|
||||
///
|
||||
/// After this, the `HttpMessage` instance can be used as an `io::Read` in order to read out
|
||||
/// the response body.
|
||||
fn get_incoming(&mut self) -> ::Result<ResponseHead>;
|
||||
/// Set the read timeout duration for this message.
|
||||
fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()>;
|
||||
/// Set the write timeout duration for this message.
|
||||
fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()>;
|
||||
/// Closes the underlying HTTP connection.
|
||||
fn close_connection(&mut self) -> ::Result<()>;
|
||||
/// Returns whether the incoming message has a body.
|
||||
fn has_body(&self) -> bool;
|
||||
/// Called when the Client wishes to use a Proxy.
|
||||
fn set_proxied(&mut self, val: bool) {
|
||||
// default implementation so as to not be a breaking change.
|
||||
warn!("default set_proxied({:?})", val);
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpMessage {
|
||||
unsafe fn downcast_ref_unchecked<T: 'static>(&self) -> &T {
|
||||
mem::transmute(traitobject::data(self))
|
||||
}
|
||||
|
||||
unsafe fn downcast_mut_unchecked<T: 'static>(&mut self) -> &mut T {
|
||||
mem::transmute(traitobject::data_mut(self))
|
||||
}
|
||||
|
||||
unsafe fn downcast_unchecked<T: 'static>(self: Box<HttpMessage>) -> Box<T> {
|
||||
let raw: *mut HttpMessage = mem::transmute(self);
|
||||
mem::transmute(traitobject::data_mut(raw))
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpMessage {
|
||||
/// Is the underlying type in this trait object a T?
|
||||
#[inline]
|
||||
pub fn is<T: Any>(&self) -> bool {
|
||||
(*self).get_type() == TypeId::of::<T>()
|
||||
}
|
||||
|
||||
/// If the underlying type is T, get a reference to the contained data.
|
||||
#[inline]
|
||||
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
|
||||
if self.is::<T>() {
|
||||
Some(unsafe { self.downcast_ref_unchecked() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// If the underlying type is T, get a mutable reference to the contained
|
||||
/// data.
|
||||
#[inline]
|
||||
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
|
||||
if self.is::<T>() {
|
||||
Some(unsafe { self.downcast_mut_unchecked() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// If the underlying type is T, extract it.
|
||||
#[inline]
|
||||
pub fn downcast<T: Any>(self: Box<HttpMessage>)
|
||||
-> Result<Box<T>, Box<HttpMessage>> {
|
||||
if self.is::<T>() {
|
||||
Ok(unsafe { self.downcast_unchecked() })
|
||||
} else {
|
||||
Err(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
331
src/http/mod.rs
331
src/http/mod.rs
@@ -1,25 +1,196 @@
|
||||
//! Pieces pertaining to the HTTP message protocol.
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::time::Duration;
|
||||
|
||||
use header::Connection;
|
||||
use header::ConnectionOption::{KeepAlive, Close};
|
||||
use header::Headers;
|
||||
use method::Method;
|
||||
use net::Transport;
|
||||
use status::StatusCode;
|
||||
use uri::RequestUri;
|
||||
use version::HttpVersion;
|
||||
use version::HttpVersion::{Http10, Http11};
|
||||
|
||||
#[cfg(feature = "serde-serialization")]
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
pub use self::message::{HttpMessage, RequestHead, ResponseHead, Protocol};
|
||||
pub use self::conn::{Conn, MessageHandler, MessageHandlerFactory, Seed, Key};
|
||||
|
||||
pub mod h1;
|
||||
pub mod h2;
|
||||
pub mod message;
|
||||
mod buffer;
|
||||
pub mod channel;
|
||||
mod conn;
|
||||
mod h1;
|
||||
//mod h2;
|
||||
|
||||
/// Wraps a `Transport` to provide HTTP decoding when reading.
|
||||
#[derive(Debug)]
|
||||
pub struct Decoder<'a, T: Read + 'a>(DecoderImpl<'a, T>);
|
||||
|
||||
/// Wraps a `Transport` to provide HTTP encoding when writing.
|
||||
#[derive(Debug)]
|
||||
pub struct Encoder<'a, T: Transport + 'a>(EncoderImpl<'a, T>);
|
||||
|
||||
#[derive(Debug)]
|
||||
enum DecoderImpl<'a, T: Read + 'a> {
|
||||
H1(&'a mut h1::Decoder, Trans<'a, T>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Trans<'a, T: Read + 'a> {
|
||||
Port(&'a mut T),
|
||||
Buf(self::buffer::BufReader<'a, T>)
|
||||
}
|
||||
|
||||
impl<'a, T: Read + 'a> Read for Trans<'a, T> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match *self {
|
||||
Trans::Port(ref mut t) => t.read(buf),
|
||||
Trans::Buf(ref mut b) => b.read(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum EncoderImpl<'a, T: Transport + 'a> {
|
||||
H1(&'a mut h1::Encoder, &'a mut T),
|
||||
}
|
||||
|
||||
impl<'a, T: Read> Decoder<'a, T> {
|
||||
fn h1(decoder: &'a mut h1::Decoder, transport: Trans<'a, T>) -> Decoder<'a, T> {
|
||||
Decoder(DecoderImpl::H1(decoder, transport))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Transport> Encoder<'a, T> {
|
||||
fn h1(encoder: &'a mut h1::Encoder, transport: &'a mut T) -> Encoder<'a, T> {
|
||||
Encoder(EncoderImpl::H1(encoder, transport))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Read> Read for Decoder<'a, T> {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self.0 {
|
||||
DecoderImpl::H1(ref mut decoder, ref mut transport) => {
|
||||
decoder.decode(transport, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Transport> Write for Encoder<'a, T> {
|
||||
#[inline]
|
||||
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
|
||||
if data.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
match self.0 {
|
||||
EncoderImpl::H1(ref mut encoder, ref mut transport) => {
|
||||
encoder.encode(*transport, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
match self.0 {
|
||||
EncoderImpl::H1(_, ref mut transport) => {
|
||||
transport.flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Because privacy rules. Reasons.
|
||||
/// https://github.com/rust-lang/rust/issues/30905
|
||||
mod internal {
|
||||
use std::io::{self, Write};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WriteBuf<T: AsRef<[u8]>> {
|
||||
pub bytes: T,
|
||||
pub pos: usize,
|
||||
}
|
||||
|
||||
pub trait AtomicWrite {
|
||||
fn write_atomic(&mut self, data: &[&[u8]]) -> io::Result<usize>;
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
impl<T: Write + ::vecio::Writev> AtomicWrite for T {
|
||||
|
||||
fn write_atomic(&mut self, bufs: &[&[u8]]) -> io::Result<usize> {
|
||||
self.writev(bufs)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
impl<T: Write> AtomicWrite for T {
|
||||
fn write_atomic(&mut self, bufs: &[&[u8]]) -> io::Result<usize> {
|
||||
let vec = bufs.concat();
|
||||
self.write(&vec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An Incoming Message head. Includes request/status line, and headers.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MessageHead<S> {
|
||||
/// HTTP version of the message.
|
||||
pub version: HttpVersion,
|
||||
/// Subject (request line or status line) of Incoming message.
|
||||
pub subject: S,
|
||||
/// Headers of the Incoming message.
|
||||
pub headers: Headers
|
||||
}
|
||||
|
||||
/// An incoming request message.
|
||||
pub type RequestHead = MessageHead<RequestLine>;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RequestLine(pub Method, pub RequestUri);
|
||||
|
||||
impl fmt::Display for RequestLine {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{} {}", self.0, self.1)
|
||||
}
|
||||
}
|
||||
|
||||
/// An incoming response message.
|
||||
pub type ResponseHead = MessageHead<RawStatus>;
|
||||
|
||||
impl<S> MessageHead<S> {
|
||||
pub fn should_keep_alive(&self) -> bool {
|
||||
should_keep_alive(self.version, &self.headers)
|
||||
}
|
||||
}
|
||||
|
||||
/// The raw status code and reason-phrase.
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct RawStatus(pub u16, pub Cow<'static, str>);
|
||||
|
||||
impl fmt::Display for RawStatus {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{} {}", self.0, self.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StatusCode> for RawStatus {
|
||||
fn from(status: StatusCode) -> RawStatus {
|
||||
RawStatus(status.to_u16(), Cow::Borrowed(status.canonical_reason().unwrap_or("")))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RawStatus {
|
||||
fn default() -> RawStatus {
|
||||
RawStatus(200, Cow::Borrowed("OK"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde-serialization")]
|
||||
impl Serialize for RawStatus {
|
||||
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
|
||||
@@ -46,6 +217,158 @@ pub fn should_keep_alive(version: HttpVersion, headers: &Headers) -> bool {
|
||||
_ => true
|
||||
}
|
||||
}
|
||||
pub type ParseResult<T> = ::Result<Option<(MessageHead<T>, usize)>>;
|
||||
|
||||
pub fn parse<T: Http1Message<Incoming=I>, I>(rdr: &[u8]) -> ParseResult<I> {
|
||||
h1::parse::<T, I>(rdr)
|
||||
}
|
||||
|
||||
// These 2 enums are not actually dead_code. They are used in the server and
|
||||
// and client modules, respectively. However, their being used as associated
|
||||
// types doesn't mark them as used, so the dead_code linter complains.
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub enum ServerMessage {}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub enum ClientMessage {}
|
||||
|
||||
pub trait Http1Message {
|
||||
type Incoming;
|
||||
type Outgoing: Default;
|
||||
//TODO: replace with associated const when stable
|
||||
fn initial_interest() -> Next;
|
||||
fn parse(bytes: &[u8]) -> ParseResult<Self::Incoming>;
|
||||
fn decoder(head: &MessageHead<Self::Incoming>) -> ::Result<h1::Decoder>;
|
||||
fn encode(head: MessageHead<Self::Outgoing>, dst: &mut Vec<u8>) -> h1::Encoder;
|
||||
|
||||
}
|
||||
|
||||
/// Used to signal desired events when working with asynchronous IO.
|
||||
#[must_use]
|
||||
#[derive(Clone)]
|
||||
pub struct Next {
|
||||
interest: Next_,
|
||||
timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Next {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
try!(write!(f, "Next::{:?}", &self.interest));
|
||||
match self.timeout {
|
||||
Some(ref d) => write!(f, "({:?})", d),
|
||||
None => Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum Next_ {
|
||||
Read,
|
||||
Write,
|
||||
ReadWrite,
|
||||
Wait,
|
||||
End,
|
||||
Remove,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum Reg {
|
||||
Read,
|
||||
Write,
|
||||
ReadWrite,
|
||||
Wait,
|
||||
Remove
|
||||
}
|
||||
|
||||
/// A notifier to wakeup a socket after having used `Next::wait()`
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Control {
|
||||
tx: self::channel::Sender<Next>,
|
||||
}
|
||||
|
||||
impl Control {
|
||||
/// Wakeup a waiting socket to listen for a certain event.
|
||||
pub fn ready(&self, next: Next) -> Result<(), ControlError> {
|
||||
//TODO: assert!( next.interest != Next_::Wait ) ?
|
||||
self.tx.send(next).map_err(|_| ControlError(()))
|
||||
}
|
||||
}
|
||||
|
||||
/// An error occured trying to tell a Control it is ready.
|
||||
#[derive(Debug)]
|
||||
pub struct ControlError(());
|
||||
|
||||
impl ::std::error::Error for ControlError {
|
||||
fn description(&self) -> &str {
|
||||
"Cannot wakeup event loop: loop is closed"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ControlError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(::std::error::Error::description(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl Next {
|
||||
fn new(interest: Next_) -> Next {
|
||||
Next {
|
||||
interest: interest,
|
||||
timeout: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn interest(&self) -> Reg {
|
||||
match self.interest {
|
||||
Next_::Read => Reg::Read,
|
||||
Next_::Write => Reg::Write,
|
||||
Next_::ReadWrite => Reg::ReadWrite,
|
||||
Next_::Wait => Reg::Wait,
|
||||
Next_::End => Reg::Remove,
|
||||
Next_::Remove => Reg::Remove,
|
||||
}
|
||||
}
|
||||
|
||||
/// Signals the desire to read from the transport.
|
||||
pub fn read() -> Next {
|
||||
Next::new(Next_::Read)
|
||||
}
|
||||
|
||||
/// Signals the desire to write to the transport.
|
||||
pub fn write() -> Next {
|
||||
Next::new(Next_::Write)
|
||||
}
|
||||
|
||||
/// Signals the desire to read and write to the transport.
|
||||
pub fn read_and_write() -> Next {
|
||||
Next::new(Next_::ReadWrite)
|
||||
}
|
||||
|
||||
/// Signals the desire to end the current HTTP message.
|
||||
pub fn end() -> Next {
|
||||
Next::new(Next_::End)
|
||||
}
|
||||
|
||||
/// Signals the desire to abruptly remove the current transport from the
|
||||
/// event loop.
|
||||
pub fn remove() -> Next {
|
||||
Next::new(Next_::Remove)
|
||||
}
|
||||
|
||||
/// Signals the desire to wait until some future time before acting again.
|
||||
pub fn wait() -> Next {
|
||||
Next::new(Next_::Wait)
|
||||
}
|
||||
|
||||
/// Signals a maximum duration to be waited for the desired event.
|
||||
pub fn timeout(mut self, dur: Duration) -> Next {
|
||||
self.timeout = Some(dur);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_keep_alive() {
|
||||
|
||||
Reference in New Issue
Block a user