Progress towards allowing large writes
This commit is contained in:
@@ -78,6 +78,9 @@ pub enum User {
|
||||
/// transmit a Data frame to the remote.
|
||||
FlowControlViolation,
|
||||
|
||||
/// The payload size is too big
|
||||
PayloadTooBig,
|
||||
|
||||
/// The connection state is corrupt and the connection should be dropped.
|
||||
Corrupt,
|
||||
|
||||
@@ -128,6 +131,7 @@ macro_rules! user_desc {
|
||||
StreamReset(_) => concat!($prefix, "frame sent on reset stream"),
|
||||
Corrupt => concat!($prefix, "connection state corrupt"),
|
||||
Rejected => concat!($prefix, "stream would exceed remote max concurrency"),
|
||||
PayloadTooBig => concat!($prefix, "payload too big"),
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -52,9 +52,24 @@ impl<T> Data<T> {
|
||||
&self.data
|
||||
}
|
||||
|
||||
pub fn payload_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
pub fn into_payload(self) -> T {
|
||||
self.data
|
||||
}
|
||||
|
||||
pub fn map<F, U>(self, f: F) -> Data<U>
|
||||
where F: FnOnce(T) -> U,
|
||||
{
|
||||
Data {
|
||||
stream_id: self.stream_id,
|
||||
data: f(self.data),
|
||||
flags: self.flags,
|
||||
pad_len: self.pad_len,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Buf> Data<T> {
|
||||
|
||||
@@ -80,6 +80,24 @@ impl<T> Frame<T> {
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map<F, U>(self, f: F) -> Frame<U>
|
||||
where F: FnOnce(T) -> U
|
||||
{
|
||||
use self::Frame::*;
|
||||
|
||||
match self {
|
||||
Data(frame) => frame.map(f).into(),
|
||||
Headers(frame) => frame.into(),
|
||||
Priority(frame) => frame.into(),
|
||||
PushPromise(frame) => frame.into(),
|
||||
Settings(frame) => frame.into(),
|
||||
Ping(frame) => frame.into(),
|
||||
GoAway(frame) => frame.into(),
|
||||
WindowUpdate(frame) => frame.into(),
|
||||
Reset(frame) => frame.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Buf> Frame<T> {
|
||||
|
||||
@@ -12,6 +12,15 @@ impl<T, B> Codec<T, B> {
|
||||
self.framed_write().apply_remote_settings(frame);
|
||||
}
|
||||
|
||||
/// Takes the data payload value that was fully written to the socket
|
||||
pub(crate) fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> {
|
||||
self.framed_write().take_last_data_frame()
|
||||
}
|
||||
|
||||
pub fn max_send_frame_size(&self) -> usize {
|
||||
self.inner.get_ref().max_frame_size()
|
||||
}
|
||||
|
||||
fn framed_read(&mut self) -> &mut FramedRead<FramedWrite<T, B>> {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
@@ -12,15 +12,12 @@ use std::marker::PhantomData;
|
||||
|
||||
/// An H2 connection
|
||||
#[derive(Debug)]
|
||||
pub struct Connection<T, P, B: IntoBuf = Bytes> {
|
||||
pub(crate) struct Connection<T, P, B: IntoBuf = Bytes> {
|
||||
// Codec
|
||||
codec: Codec<T, B::Buf>,
|
||||
|
||||
// TODO: Remove <B>
|
||||
ping_pong: PingPong<B::Buf>,
|
||||
codec: Codec<T, Prioritized<B::Buf>>,
|
||||
ping_pong: PingPong<Prioritized<B::Buf>>,
|
||||
settings: Settings,
|
||||
streams: Streams<B::Buf>,
|
||||
|
||||
_phantom: PhantomData<P>,
|
||||
}
|
||||
|
||||
@@ -29,7 +26,7 @@ impl<T, P, B> Connection<T, P, B>
|
||||
P: Peer,
|
||||
B: IntoBuf,
|
||||
{
|
||||
pub fn new(codec: Codec<T, B::Buf>) -> Connection<T, P, B> {
|
||||
pub fn new(codec: Codec<T, Prioritized<B::Buf>>) -> Connection<T, P, B> {
|
||||
// TODO: Actually configure
|
||||
let streams = Streams::new::<P>(streams::Config {
|
||||
max_remote_initiated: None,
|
||||
@@ -49,10 +46,11 @@ impl<T, P, B> Connection<T, P, B>
|
||||
|
||||
/// Returns `Ready` when the connection is ready to receive a frame.
|
||||
pub fn poll_ready(&mut self) -> Poll<(), ConnectionError> {
|
||||
try_ready!(self.poll_send_ready());
|
||||
|
||||
// TODO: Once there is write buffering, this shouldn't be needed
|
||||
try_ready!(self.codec.poll_ready());
|
||||
// The order of these calls don't really matter too much as only one
|
||||
// should have pending work.
|
||||
try_ready!(self.ping_pong.send_pending_pong(&mut self.codec));
|
||||
try_ready!(self.settings.send_pending_ack(&mut self.codec, &mut self.streams));
|
||||
try_ready!(self.streams.send_pending_refusal(&mut self.codec));
|
||||
|
||||
Ok(().into())
|
||||
}
|
||||
@@ -74,7 +72,7 @@ impl<T, P, B> Connection<T, P, B>
|
||||
|
||||
loop {
|
||||
// First, ensure that the `Connection` is able to receive a frame
|
||||
try_ready!(self.poll_recv_ready());
|
||||
try_ready!(self.poll_ready());
|
||||
|
||||
trace!("polling codec");
|
||||
|
||||
@@ -137,38 +135,11 @@ impl<T, P, B> Connection<T, P, B>
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Private =====
|
||||
|
||||
/// Returns `Ready` when the `Connection` is ready to receive a frame from
|
||||
/// the socket.
|
||||
fn poll_recv_ready(&mut self) -> Poll<(), ConnectionError> {
|
||||
// The order of these calls don't really matter too much as only one
|
||||
// should have pending work.
|
||||
try_ready!(self.ping_pong.send_pending_pong(&mut self.codec));
|
||||
try_ready!(self.settings.send_pending_ack(&mut self.codec, &mut self.streams));
|
||||
try_ready!(self.streams.send_pending_refusal(&mut self.codec));
|
||||
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
/// Returns `Ready` when the `Connection` is ready to accept a frame from
|
||||
/// the user
|
||||
///
|
||||
/// This function is currently used by poll_complete, but at some point it
|
||||
/// will probably not be required.
|
||||
fn poll_send_ready(&mut self) -> Poll<(), ConnectionError> {
|
||||
// TODO: Is this function needed?
|
||||
try_ready!(self.poll_recv_ready());
|
||||
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), ConnectionError> {
|
||||
try_ready!(self.poll_send_ready());
|
||||
try_ready!(self.poll_ready());
|
||||
|
||||
// Ensure all window updates have been sent.
|
||||
try_ready!(self.streams.poll_complete(&mut self.codec));
|
||||
try_ready!(self.codec.poll_complete());
|
||||
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
@@ -147,6 +147,10 @@ impl<T> FramedRead<T> {
|
||||
Ok(Some(frame))
|
||||
}
|
||||
|
||||
pub fn get_ref(&self) -> &T {
|
||||
self.inner.get_ref()
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.inner.get_mut()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use {hpack, ConnectionError, FrameSize};
|
||||
use error::User::*;
|
||||
use frame::{self, Frame};
|
||||
|
||||
use futures::*;
|
||||
@@ -17,24 +18,23 @@ pub struct FramedWrite<T, B> {
|
||||
hpack: hpack::Encoder,
|
||||
|
||||
/// Write buffer
|
||||
///
|
||||
/// TODO: Should this be a ring buffer?
|
||||
buf: Cursor<BytesMut>,
|
||||
|
||||
/// Next frame to encode
|
||||
next: Option<Next<B>>,
|
||||
|
||||
/// Last data frame
|
||||
last_data_frame: Option<frame::Data<B>>,
|
||||
|
||||
/// Max frame size, this is specified by the peer
|
||||
max_frame_size: FrameSize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Next<B> {
|
||||
Data {
|
||||
/// Length of the current frame being written
|
||||
frame_len: usize,
|
||||
|
||||
/// Data frame to encode
|
||||
data: frame::Data<B>,
|
||||
},
|
||||
Data(frame::Data<B>),
|
||||
Continuation(frame::Continuation),
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ impl<T, B> FramedWrite<T, B>
|
||||
hpack: hpack::Encoder::default(),
|
||||
buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
|
||||
next: None,
|
||||
last_data_frame: None,
|
||||
max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE,
|
||||
}
|
||||
}
|
||||
@@ -82,20 +83,27 @@ impl<T, B> FramedWrite<T, B>
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.next.is_none() && !self.buf.has_remaining()
|
||||
}
|
||||
|
||||
fn frame_len(&self, data: &frame::Data<B>) -> usize {
|
||||
cmp::min(self.max_frame_size as usize, data.payload().remaining())
|
||||
match self.next {
|
||||
Some(Next::Data(ref frame)) => !frame.payload().has_remaining(),
|
||||
_ => !self.buf.has_remaining(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B> FramedWrite<T, B> {
|
||||
pub fn max_frame_size(&self) -> usize {
|
||||
self.max_frame_size as usize
|
||||
}
|
||||
|
||||
pub fn apply_remote_settings(&mut self, settings: &frame::Settings) {
|
||||
if let Some(val) = settings.max_frame_size() {
|
||||
self.max_frame_size = val;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> {
|
||||
self.last_data_frame.take()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B> Sink for FramedWrite<T, B>
|
||||
@@ -116,24 +124,30 @@ impl<T, B> Sink for FramedWrite<T, B>
|
||||
|
||||
match item {
|
||||
Frame::Data(mut v) => {
|
||||
if v.payload().remaining() >= CHAIN_THRESHOLD {
|
||||
// Ensure that the payload is not greater than the max frame.
|
||||
let len = v.payload().remaining();
|
||||
|
||||
if len > self.max_frame_size() {
|
||||
return Err(PayloadTooBig.into());
|
||||
}
|
||||
|
||||
if len >= CHAIN_THRESHOLD {
|
||||
let head = v.head();
|
||||
let len = self.frame_len(&v);
|
||||
|
||||
// Encode the frame head to the buffer
|
||||
head.encode(len, self.buf.get_mut());
|
||||
|
||||
// Save the data frame
|
||||
self.next = Some(Next::Data {
|
||||
frame_len: len,
|
||||
data: v,
|
||||
});
|
||||
self.next = Some(Next::Data(v));
|
||||
} else {
|
||||
v.encode_chunk(self.buf.get_mut());
|
||||
|
||||
// The chunk has been fully encoded, so there is no need to
|
||||
// keep it around
|
||||
assert_eq!(v.payload().remaining(), 0, "chunk not fully encoded");
|
||||
|
||||
// Save off the last frame...
|
||||
self.last_data_frame = Some(v);
|
||||
}
|
||||
}
|
||||
Frame::Headers(v) => {
|
||||
@@ -179,16 +193,27 @@ impl<T, B> Sink for FramedWrite<T, B>
|
||||
fn poll_complete(&mut self) -> Poll<(), ConnectionError> {
|
||||
trace!("poll_complete");
|
||||
|
||||
// TODO: implement
|
||||
match self.next {
|
||||
Some(Next::Data { .. }) => unimplemented!(),
|
||||
_ => {}
|
||||
while !self.is_empty() {
|
||||
match self.next {
|
||||
Some(Next::Data(ref mut frame)) => {
|
||||
let mut buf = self.buf.by_ref().chain(frame.payload_mut());
|
||||
try_ready!(self.inner.write_buf(&mut buf));
|
||||
}
|
||||
_ => {
|
||||
try_ready!(self.inner.write_buf(&mut self.buf));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// As long as there is data to write, try to write it!
|
||||
while !self.is_empty() {
|
||||
trace!("writing {}", self.buf.remaining());
|
||||
try_ready!(self.inner.write_buf(&mut self.buf));
|
||||
// The data frame has been written, so unset it
|
||||
match self.next.take() {
|
||||
Some(Next::Data(frame)) => {
|
||||
self.last_data_frame = Some(frame);
|
||||
}
|
||||
Some(Next::Continuation(frame)) => {
|
||||
unimplemented!();
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
trace!("flushing buffer");
|
||||
|
||||
@@ -6,14 +6,15 @@ mod ping_pong;
|
||||
mod settings;
|
||||
mod streams;
|
||||
|
||||
pub use self::connection::Connection;
|
||||
pub use self::streams::{Streams, StreamRef, Chunk};
|
||||
pub(crate) use self::connection::Connection;
|
||||
pub(crate) use self::streams::{Streams, StreamRef, Chunk};
|
||||
|
||||
use self::codec::Codec;
|
||||
use self::framed_read::FramedRead;
|
||||
use self::framed_write::FramedWrite;
|
||||
use self::ping_pong::PingPong;
|
||||
use self::settings::Settings;
|
||||
use self::streams::Prioritized;
|
||||
|
||||
use {StreamId, ConnectionError};
|
||||
use error::Reason;
|
||||
@@ -63,7 +64,7 @@ pub const MAX_WINDOW_SIZE: WindowSize = ::std::u32::MAX;
|
||||
/// When the server is performing the handshake, it is able to only send
|
||||
/// `Settings` frames and is expected to receive the client preface as a byte
|
||||
/// stream. To represent this, `Settings<FramedWrite<T>>` is returned.
|
||||
pub fn framed_write<T, B>(io: T) -> FramedWrite<T, B>
|
||||
pub(crate) fn framed_write<T, B>(io: T) -> FramedWrite<T, B>
|
||||
where T: AsyncRead + AsyncWrite,
|
||||
B: Buf,
|
||||
{
|
||||
@@ -71,7 +72,7 @@ pub fn framed_write<T, B>(io: T) -> FramedWrite<T, B>
|
||||
}
|
||||
|
||||
/// Create a full H2 transport from the server handshaker
|
||||
pub fn from_framed_write<T, P, B>(framed_write: FramedWrite<T, B::Buf>)
|
||||
pub(crate) fn from_framed_write<T, P, B>(framed_write: FramedWrite<T, Prioritized<B::Buf>>)
|
||||
-> Connection<T, P, B>
|
||||
where T: AsyncRead + AsyncWrite,
|
||||
P: Peer,
|
||||
|
||||
@@ -2,7 +2,7 @@ use {frame, ConnectionError};
|
||||
use proto::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Settings {
|
||||
pub(crate) struct Settings {
|
||||
/// Received SETTINGS frame pending processing. The ACK must be written to
|
||||
/// the socket first then the settings applied **before** receiving any
|
||||
/// further frames.
|
||||
@@ -26,12 +26,13 @@ impl Settings {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_pending_ack<T, B>(&mut self,
|
||||
dst: &mut Codec<T, B>,
|
||||
streams: &mut Streams<B>)
|
||||
pub fn send_pending_ack<T, B, C>(&mut self,
|
||||
dst: &mut Codec<T, B>,
|
||||
streams: &mut Streams<C>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
B: Buf,
|
||||
C: Buf,
|
||||
{
|
||||
if let Some(ref settings) = self.pending {
|
||||
let frame = frame::Settings::ack();
|
||||
|
||||
@@ -8,7 +8,8 @@ mod store;
|
||||
mod stream;
|
||||
mod streams;
|
||||
|
||||
pub use self::streams::{Streams, StreamRef, Chunk};
|
||||
pub(crate) use self::streams::{Streams, StreamRef, Chunk};
|
||||
pub(crate) use self::prioritize::Prioritized;
|
||||
|
||||
use self::buffer::Buffer;
|
||||
use self::flow_control::FlowControl;
|
||||
|
||||
@@ -22,6 +22,17 @@ pub(super) struct Prioritize<B> {
|
||||
conn_task: Option<task::Task>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Prioritized<B> {
|
||||
// The buffer
|
||||
inner: B,
|
||||
|
||||
// The stream that this is associated with
|
||||
stream: store::Key,
|
||||
}
|
||||
|
||||
// ===== impl Prioritize =====
|
||||
|
||||
impl<B> Prioritize<B>
|
||||
where B: Buf,
|
||||
{
|
||||
@@ -61,40 +72,56 @@ impl<B> Prioritize<B>
|
||||
pub fn queue_frame(&mut self,
|
||||
frame: Frame<B>,
|
||||
stream: &mut store::Ptr<B>)
|
||||
{
|
||||
if self.queue_frame2(frame, stream) {
|
||||
// Notification required
|
||||
if let Some(ref task) = self.conn_task {
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Queue frame without actually notifying. Returns ture if the queue was
|
||||
/// succesfful.
|
||||
fn queue_frame2(&mut self, frame: Frame<B>, stream: &mut store::Ptr<B>)
|
||||
-> bool
|
||||
{
|
||||
self.buffered_data += frame.flow_len();
|
||||
|
||||
// queue the frame in the buffer
|
||||
stream.pending_send.push_back(&mut self.buffer, frame);
|
||||
|
||||
if stream.is_pending_send {
|
||||
debug_assert!(!self.pending_send.is_empty());
|
||||
|
||||
// Already queued to have frame processed.
|
||||
return;
|
||||
}
|
||||
|
||||
// Queue the stream
|
||||
push_sender(&mut self.pending_send, stream);
|
||||
!push_sender(&mut self.pending_send, stream)
|
||||
}
|
||||
|
||||
if let Some(ref task) = self.conn_task {
|
||||
task.notify();
|
||||
}
|
||||
/// Push the frame to the front of the stream's deque, scheduling the
|
||||
/// steream if needed.
|
||||
fn push_back_frame(&mut self, frame: Frame<B>, stream: &mut store::Ptr<B>) {
|
||||
// Push the frame to the front of the stream's deque
|
||||
stream.pending_send.push_front(&mut self.buffer, frame);
|
||||
|
||||
// If needed, schedule the sender
|
||||
push_sender(&mut self.pending_capacity, stream);
|
||||
}
|
||||
|
||||
pub fn poll_complete<T>(&mut self,
|
||||
store: &mut Store<B>,
|
||||
dst: &mut Codec<T, B>)
|
||||
dst: &mut Codec<T, Prioritized<B>>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
{
|
||||
// Track the task
|
||||
self.conn_task = Some(task::current());
|
||||
|
||||
// Ensure codec is ready
|
||||
try_ready!(dst.poll_ready());
|
||||
|
||||
// Reclaim any frame that has previously been written
|
||||
self.reclaim_frame(store, dst);
|
||||
|
||||
trace!("poll_complete");
|
||||
loop {
|
||||
// Ensure codec is ready
|
||||
try_ready!(dst.poll_ready());
|
||||
|
||||
match self.pop_frame(store) {
|
||||
Some(frame) => {
|
||||
trace!("writing frame={:?}", frame);
|
||||
@@ -106,15 +133,31 @@ impl<B> Prioritize<B>
|
||||
// We already verified that `dst` is ready to accept the
|
||||
// write
|
||||
assert!(res.is_ready());
|
||||
|
||||
// Ensure the codec is ready to try the loop again.
|
||||
try_ready!(dst.poll_ready());
|
||||
|
||||
// Because, always try to reclaim...
|
||||
self.reclaim_frame(store, dst);
|
||||
|
||||
}
|
||||
None => {
|
||||
// Try to flush the codec.
|
||||
try_ready!(dst.poll_complete());
|
||||
|
||||
// This might release a data frame...
|
||||
if !self.reclaim_frame(store, dst) {
|
||||
return Ok(().into());
|
||||
}
|
||||
|
||||
// No need to poll ready as poll_complete() does this for
|
||||
// us...
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn pop_frame(&mut self, store: &mut Store<B>) -> Option<Frame<B>> {
|
||||
fn pop_frame(&mut self, store: &mut Store<B>) -> Option<Frame<Prioritized<B>>> {
|
||||
loop {
|
||||
match self.pop_sender(store) {
|
||||
Some(mut stream) => {
|
||||
@@ -124,11 +167,7 @@ impl<B> Prioritize<B>
|
||||
|
||||
if len > self.flow_control.effective_window_size() as usize {
|
||||
// TODO: This could be smarter...
|
||||
stream.pending_send.push_front(&mut self.buffer, frame.into());
|
||||
|
||||
// Push the stream onto the list of streams
|
||||
// waiting for connection capacity
|
||||
push_sender(&mut self.pending_capacity, &mut stream);
|
||||
self.push_back_frame(frame.into(), &mut stream);
|
||||
|
||||
// Try again w/ the next stream
|
||||
continue;
|
||||
@@ -143,6 +182,14 @@ impl<B> Prioritize<B>
|
||||
push_sender(&mut self.pending_send, &mut stream);
|
||||
}
|
||||
|
||||
// Add prioritization logic
|
||||
let frame = frame.map(|buf| {
|
||||
Prioritized {
|
||||
inner: buf,
|
||||
stream: stream.key(),
|
||||
}
|
||||
});
|
||||
|
||||
return Some(frame);
|
||||
}
|
||||
None => return None,
|
||||
@@ -174,10 +221,58 @@ impl<B> Prioritize<B>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reclaim_frame<T>(&mut self,
|
||||
store: &mut Store<B>,
|
||||
dst: &mut Codec<T, Prioritized<B>>) -> bool
|
||||
{
|
||||
// First check if there are any data chunks to take back
|
||||
if let Some(frame) = dst.take_last_data_frame() {
|
||||
let mut stream = store.resolve(frame.payload().stream);
|
||||
|
||||
let frame = frame.map(|prioritized| {
|
||||
// TODO: Ensure fully written
|
||||
prioritized.inner
|
||||
});
|
||||
|
||||
self.push_back_frame(frame.into(), &mut stream);
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn push_sender<B>(list: &mut store::List<B>, stream: &mut store::Ptr<B>) {
|
||||
debug_assert!(!stream.is_pending_send);
|
||||
/// Push the stream onto the `pending_send` list. Returns true if the sender was
|
||||
/// not already queued.
|
||||
fn push_sender<B>(list: &mut store::List<B>, stream: &mut store::Ptr<B>)
|
||||
-> bool
|
||||
{
|
||||
if stream.is_pending_send {
|
||||
return false;
|
||||
}
|
||||
|
||||
list.push::<stream::Next>(stream);
|
||||
stream.is_pending_send = true;
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
// ===== impl Prioritized =====
|
||||
|
||||
impl<B> Buf for Prioritized<B>
|
||||
where B: Buf,
|
||||
{
|
||||
fn remaining(&self) -> usize {
|
||||
self.inner.remaining()
|
||||
}
|
||||
|
||||
fn bytes(&self) -> &[u8] {
|
||||
self.inner.bytes()
|
||||
}
|
||||
|
||||
fn advance(&mut self, cnt: usize) {
|
||||
self.inner.advance(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ impl<B> Recv<B> where B: Buf {
|
||||
}
|
||||
|
||||
/// Send any pending refusals.
|
||||
pub fn send_pending_refusal<T>(&mut self, dst: &mut Codec<T, B>)
|
||||
pub fn send_pending_refusal<T>(&mut self, dst: &mut Codec<T, Prioritized<B>>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
{
|
||||
|
||||
@@ -146,7 +146,7 @@ impl<B> Send<B> where B: Buf {
|
||||
|
||||
pub fn poll_complete<T>(&mut self,
|
||||
store: &mut Store<B>,
|
||||
dst: &mut Codec<T, B>)
|
||||
dst: &mut Codec<T, Prioritized<B>>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
{
|
||||
@@ -316,6 +316,8 @@ impl<B> Send<B> where B: Buf {
|
||||
} else {
|
||||
stream.unadvertised_send_window -= dec;
|
||||
}
|
||||
|
||||
unimplemented!();
|
||||
}
|
||||
});
|
||||
} else if val > old_val {
|
||||
|
||||
@@ -8,19 +8,19 @@ use std::sync::{Arc, Mutex};
|
||||
// TODO: All the VecDeques should become linked lists using the State
|
||||
// values.
|
||||
#[derive(Debug)]
|
||||
pub struct Streams<B> {
|
||||
pub(crate) struct Streams<B> {
|
||||
inner: Arc<Mutex<Inner<B>>>,
|
||||
}
|
||||
|
||||
/// Reference to the stream state
|
||||
#[derive(Debug)]
|
||||
pub struct StreamRef<B> {
|
||||
pub(crate) struct StreamRef<B> {
|
||||
inner: Arc<Mutex<Inner<B>>>,
|
||||
key: store::Key,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Chunk<B>
|
||||
pub(crate) struct Chunk<B>
|
||||
where B: Buf,
|
||||
{
|
||||
inner: Arc<Mutex<Inner<B>>>,
|
||||
@@ -231,7 +231,7 @@ impl<B> Streams<B>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_pending_refusal<T>(&mut self, dst: &mut Codec<T, B>)
|
||||
pub fn send_pending_refusal<T>(&mut self, dst: &mut Codec<T, Prioritized<B>>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
{
|
||||
@@ -240,7 +240,7 @@ impl<B> Streams<B>
|
||||
me.actions.recv.send_pending_refusal(dst)
|
||||
}
|
||||
|
||||
pub fn poll_complete<T>(&mut self, dst: &mut Codec<T, B>)
|
||||
pub fn poll_complete<T>(&mut self, dst: &mut Codec<T, Prioritized<B>>)
|
||||
-> Poll<(), ConnectionError>
|
||||
where T: AsyncWrite,
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user