feat(http2): add HTTP2 keep-alive support for client and server

This adds HTTP2 keep-alive support to client and server connections
based losely on GRPC keep-alive. When enabled, after no data has been
received for some configured interval, an HTTP2 PING frame is sent. If
the PING is not acknowledged with a configured timeout, the connection
is closed.

Clients have an additional option to enable keep-alive while the
connection is otherwise idle. When disabled, keep-alive PINGs are only
used while there are open request/response streams. If enabled, PINGs
are sent even when there are no active streams.

For now, since these features use `tokio::time::Delay`, the `runtime`
cargo feature is required to use them.
This commit is contained in:
Sean McArthur
2020-03-20 13:58:52 -07:00
parent d838d54fdf
commit 9a8413d910
13 changed files with 1166 additions and 255 deletions

View File

@@ -1,186 +0,0 @@
// What should it do?
//
// # BDP Algorithm
//
// 1. When receiving a DATA frame, if a BDP ping isn't outstanding:
// 1a. Record current time.
// 1b. Send a BDP ping.
// 2. Increment the number of received bytes.
// 3. When the BDP ping ack is received:
// 3a. Record duration from sent time.
// 3b. Merge RTT with a running average.
// 3c. Calculate bdp as bytes/rtt.
// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
//
//
// # Implementation
//
// - `hyper::Body::h2` variant includes a "bdp channel"
// - When the body's `poll_data` yields bytes, call `bdp.sample(bytes.len())`
//
use std::sync::{Arc, Mutex, Weak};
use std::task::{self, Poll};
use std::time::{Duration, Instant};
use h2::{Ping, PingPong};
type WindowSize = u32;
/// Any higher than this likely will be hitting the TCP flow control.
const BDP_LIMIT: usize = 1024 * 1024 * 16;
pub(super) fn disabled() -> Sampler {
Sampler {
shared: Weak::new(),
}
}
pub(super) fn channel(ping_pong: PingPong, initial_window: WindowSize) -> (Sampler, Estimator) {
let shared = Arc::new(Mutex::new(Shared {
bytes: 0,
ping_pong,
ping_sent: false,
sent_at: Instant::now(),
}));
(
Sampler {
shared: Arc::downgrade(&shared),
},
Estimator {
bdp: initial_window,
max_bandwidth: 0.0,
shared,
samples: 0,
rtt: 0.0,
},
)
}
#[derive(Clone)]
pub(crate) struct Sampler {
shared: Weak<Mutex<Shared>>,
}
pub(super) struct Estimator {
shared: Arc<Mutex<Shared>>,
/// Current BDP in bytes
bdp: u32,
/// Largest bandwidth we've seen so far.
max_bandwidth: f64,
/// Count of samples made (ping sent and received)
samples: usize,
/// Round trip time in seconds
rtt: f64,
}
struct Shared {
bytes: usize,
ping_pong: PingPong,
ping_sent: bool,
sent_at: Instant,
}
impl Sampler {
pub(crate) fn sample(&self, bytes: usize) {
let shared = if let Some(shared) = self.shared.upgrade() {
shared
} else {
return;
};
let mut inner = shared.lock().unwrap();
if !inner.ping_sent {
if let Ok(()) = inner.ping_pong.send_ping(Ping::opaque()) {
inner.ping_sent = true;
inner.sent_at = Instant::now();
trace!("sending BDP ping");
} else {
return;
}
}
inner.bytes += bytes;
}
}
impl Estimator {
pub(super) fn poll_estimate(&mut self, cx: &mut task::Context<'_>) -> Poll<WindowSize> {
let mut inner = self.shared.lock().unwrap();
if !inner.ping_sent {
// XXX: this doesn't register a waker...?
return Poll::Pending;
}
let (bytes, rtt) = match ready!(inner.ping_pong.poll_pong(cx)) {
Ok(_pong) => {
let rtt = inner.sent_at.elapsed();
let bytes = inner.bytes;
inner.bytes = 0;
inner.ping_sent = false;
self.samples += 1;
trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
(bytes, rtt)
}
Err(e) => {
debug!("bdp pong error: {}", e);
return Poll::Pending;
}
};
drop(inner);
if let Some(bdp) = self.calculate(bytes, rtt) {
Poll::Ready(bdp)
} else {
// XXX: this doesn't register a waker...?
Poll::Pending
}
}
fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
// No need to do any math if we're at the limit.
if self.bdp as usize == BDP_LIMIT {
return None;
}
// average the rtt
let rtt = seconds(rtt);
if self.samples < 10 {
// Average the first 10 samples
self.rtt += (rtt - self.rtt) / (self.samples as f64);
} else {
self.rtt += (rtt - self.rtt) / 0.9;
}
// calculate the current bandwidth
let bw = (bytes as f64) / (self.rtt * 1.5);
trace!("current bandwidth = {:.1}B/s", bw);
if bw < self.max_bandwidth {
// not a faster bandwidth, so don't update
return None;
} else {
self.max_bandwidth = bw;
}
// if the current `bytes` sample is at least 2/3 the previous
// bdp, increase to double the current sample.
if (bytes as f64) >= (self.bdp as f64) * 0.66 {
self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;
trace!("BDP increased to {}", self.bdp);
Some(self.bdp)
} else {
None
}
}
}
fn seconds(dur: Duration) -> f64 {
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
let secs = dur.as_secs() as f64;
secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC
}

View File

@@ -1,10 +1,13 @@
#[cfg(feature = "runtime")]
use std::time::Duration;
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
use futures_util::stream::StreamExt as _;
use h2::client::{Builder, SendRequest};
use tokio::io::{AsyncRead, AsyncWrite};
use super::{bdp, decode_content_length, PipeToSendStream, SendBuf};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
use crate::body::Payload;
use crate::common::{task, Exec, Future, Never, Pin, Poll};
use crate::headers;
@@ -32,6 +35,12 @@ pub(crate) struct Config {
pub(crate) adaptive_window: bool,
pub(crate) initial_conn_window_size: u32,
pub(crate) initial_stream_window_size: u32,
#[cfg(feature = "runtime")]
pub(crate) keep_alive_interval: Option<Duration>,
#[cfg(feature = "runtime")]
pub(crate) keep_alive_timeout: Duration,
#[cfg(feature = "runtime")]
pub(crate) keep_alive_while_idle: bool,
}
impl Default for Config {
@@ -40,6 +49,12 @@ impl Default for Config {
adaptive_window: false,
initial_conn_window_size: DEFAULT_CONN_WINDOW,
initial_stream_window_size: DEFAULT_STREAM_WINDOW,
#[cfg(feature = "runtime")]
keep_alive_interval: None,
#[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
#[cfg(feature = "runtime")]
keep_alive_while_idle: false,
}
}
}
@@ -75,16 +90,35 @@ where
}
});
let sampler = if config.adaptive_window {
let (sampler, mut estimator) =
bdp::channel(conn.ping_pong().unwrap(), config.initial_stream_window_size);
let ping_config = ping::Config {
bdp_initial_window: if config.adaptive_window {
Some(config.initial_stream_window_size)
} else {
None
},
#[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
#[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
#[cfg(feature = "runtime")]
keep_alive_while_idle: config.keep_alive_while_idle,
};
let ping = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
let (recorder, mut ponger) = ping::channel(pp, ping_config);
let conn = future::poll_fn(move |cx| {
match estimator.poll_estimate(cx) {
Poll::Ready(wnd) => {
match ponger.poll(cx) {
Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
conn.set_target_window_size(wnd);
conn.set_initial_window_size(wnd)?;
}
#[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("connection keep-alive timed out");
return Poll::Ready(Ok(()));
}
Poll::Pending => {}
}
@@ -93,16 +127,16 @@ where
let conn = conn.map_err(|e| debug!("connection error: {}", e));
exec.execute(conn_task(conn, conn_drop_rx, cancel_tx));
sampler
recorder
} else {
let conn = conn.map_err(|e| debug!("connection error: {}", e));
exec.execute(conn_task(conn, conn_drop_rx, cancel_tx));
bdp::disabled()
ping::disabled()
};
Ok(ClientTask {
bdp: sampler,
ping,
conn_drop_ref,
conn_eof,
executor: exec,
@@ -135,7 +169,7 @@ pub(crate) struct ClientTask<B>
where
B: Payload,
{
bdp: bdp::Sampler,
ping: ping::Recorder,
conn_drop_ref: ConnDropRef,
conn_eof: ConnEof,
executor: Exec,
@@ -154,6 +188,7 @@ where
match ready!(self.h2_tx.poll_ready(cx)) {
Ok(()) => (),
Err(err) => {
self.ping.ensure_not_timed_out()?;
return if err.reason() == Some(::h2::Reason::NO_ERROR) {
trace!("connection gracefully shutdown");
Poll::Ready(Ok(Dispatched::Shutdown))
@@ -188,6 +223,7 @@ where
}
};
let ping = self.ping.clone();
if !eos {
let mut pipe = Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| {
if let Err(e) = res {
@@ -201,8 +237,13 @@ where
Poll::Ready(_) => (),
Poll::Pending => {
let conn_drop_ref = self.conn_drop_ref.clone();
// keep the ping recorder's knowledge of an
// "open stream" alive while this body is
// still sending...
let ping = ping.clone();
let pipe = pipe.map(move |x| {
drop(conn_drop_ref);
drop(ping);
x
});
self.executor.execute(pipe);
@@ -210,15 +251,21 @@ where
}
}
let bdp = self.bdp.clone();
let fut = fut.map(move |result| match result {
Ok(res) => {
// record that we got the response headers
ping.record_non_data();
let content_length = decode_content_length(res.headers());
let res =
res.map(|stream| crate::Body::h2(stream, content_length, bdp));
let res = res.map(|stream| {
let ping = ping.for_stream(&stream);
crate::Body::h2(stream, content_length, ping)
});
Ok(res)
}
Err(err) => {
ping.ensure_not_timed_out().map_err(|e| (e, None))?;
debug!("client response error: {}", err);
Err((crate::Error::new_h2(err), None))
}

View File

@@ -12,8 +12,8 @@ use crate::body::Payload;
use crate::common::{task, Future, Pin, Poll};
use crate::headers::content_length_parse_all;
pub(crate) mod bdp;
pub(crate) mod client;
pub(crate) mod ping;
pub(crate) mod server;
pub(crate) use self::client::ClientTask;

509
src/proto/h2/ping.rs Normal file
View File

@@ -0,0 +1,509 @@
/// HTTP2 Ping usage
///
/// hyper uses HTTP2 pings for two purposes:
///
/// 1. Adaptive flow control using BDP
/// 2. Connection keep-alive
///
/// Both cases are optional.
///
/// # BDP Algorithm
///
/// 1. When receiving a DATA frame, if a BDP ping isn't outstanding:
/// 1a. Record current time.
/// 1b. Send a BDP ping.
/// 2. Increment the number of received bytes.
/// 3. When the BDP ping ack is received:
/// 3a. Record duration from sent time.
/// 3b. Merge RTT with a running average.
/// 3c. Calculate bdp as bytes/rtt.
/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
#[cfg(feature = "runtime")]
use std::fmt;
#[cfg(feature = "runtime")]
use std::future::Future;
#[cfg(feature = "runtime")]
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::Duration;
#[cfg(not(feature = "runtime"))]
use std::time::Instant;
use h2::{Ping, PingPong};
#[cfg(feature = "runtime")]
use tokio::time::{Delay, Instant};
type WindowSize = u32;
pub(super) fn disabled() -> Recorder {
Recorder { shared: None }
}
pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) {
debug_assert!(
config.is_enabled(),
"ping channel requires bdp or keep-alive config",
);
let bdp = config.bdp_initial_window.map(|wnd| Bdp {
bdp: wnd,
max_bandwidth: 0.0,
samples: 0,
rtt: 0.0,
});
let bytes = bdp.as_ref().map(|_| 0);
#[cfg(feature = "runtime")]
let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
timer: tokio::time::delay_for(interval),
state: KeepAliveState::Init,
});
#[cfg(feature = "runtime")]
let last_read_at = keep_alive.as_ref().map(|_| Instant::now());
let shared = Arc::new(Mutex::new(Shared {
bytes,
#[cfg(feature = "runtime")]
last_read_at,
#[cfg(feature = "runtime")]
is_keep_alive_timed_out: false,
ping_pong,
ping_sent_at: None,
}));
(
Recorder {
shared: Some(shared.clone()),
},
Ponger {
bdp,
#[cfg(feature = "runtime")]
keep_alive,
shared,
},
)
}
#[derive(Clone)]
pub(super) struct Config {
pub(super) bdp_initial_window: Option<WindowSize>,
/// If no frames are received in this amount of time, a PING frame is sent.
#[cfg(feature = "runtime")]
pub(super) keep_alive_interval: Option<Duration>,
/// After sending a keepalive PING, the connection will be closed if
/// a pong is not received in this amount of time.
#[cfg(feature = "runtime")]
pub(super) keep_alive_timeout: Duration,
/// If true, sends pings even when there are no active streams.
#[cfg(feature = "runtime")]
pub(super) keep_alive_while_idle: bool,
}
#[derive(Clone)]
pub(crate) struct Recorder {
shared: Option<Arc<Mutex<Shared>>>,
}
pub(super) struct Ponger {
bdp: Option<Bdp>,
#[cfg(feature = "runtime")]
keep_alive: Option<KeepAlive>,
shared: Arc<Mutex<Shared>>,
}
struct Shared {
ping_pong: PingPong,
ping_sent_at: Option<Instant>,
// bdp
/// If `Some`, bdp is enabled, and this tracks how many bytes have been
/// read during the current sample.
bytes: Option<usize>,
// keep-alive
/// If `Some`, keep-alive is enabled, and the Instant is how long ago
/// the connection read the last frame.
#[cfg(feature = "runtime")]
last_read_at: Option<Instant>,
#[cfg(feature = "runtime")]
is_keep_alive_timed_out: bool,
}
struct Bdp {
/// Current BDP in bytes
bdp: u32,
/// Largest bandwidth we've seen so far.
max_bandwidth: f64,
/// Count of samples made (ping sent and received)
samples: usize,
/// Round trip time in seconds
rtt: f64,
}
#[cfg(feature = "runtime")]
struct KeepAlive {
/// If no frames are received in this amount of time, a PING frame is sent.
interval: Duration,
/// After sending a keepalive PING, the connection will be closed if
/// a pong is not received in this amount of time.
timeout: Duration,
/// If true, sends pings even when there are no active streams.
while_idle: bool,
state: KeepAliveState,
timer: Delay,
}
#[cfg(feature = "runtime")]
enum KeepAliveState {
Init,
Scheduled,
PingSent,
}
pub(super) enum Ponged {
SizeUpdate(WindowSize),
#[cfg(feature = "runtime")]
KeepAliveTimedOut,
}
#[cfg(feature = "runtime")]
#[derive(Debug)]
pub(super) struct KeepAliveTimedOut;
// ===== impl Config =====
impl Config {
pub(super) fn is_enabled(&self) -> bool {
#[cfg(feature = "runtime")]
{
self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
}
#[cfg(not(feature = "runtime"))]
{
self.bdp_initial_window.is_some()
}
}
}
// ===== impl Recorder =====
impl Recorder {
pub(crate) fn record_data(&self, len: usize) {
let shared = if let Some(ref shared) = self.shared {
shared
} else {
return;
};
let mut locked = shared.lock().unwrap();
#[cfg(feature = "runtime")]
locked.update_last_read_at();
if let Some(ref mut bytes) = locked.bytes {
*bytes += len;
} else {
// no need to send bdp ping if bdp is disabled
return;
}
if !locked.is_ping_sent() {
locked.send_ping();
}
}
pub(crate) fn record_non_data(&self) {
#[cfg(feature = "runtime")]
{
let shared = if let Some(ref shared) = self.shared {
shared
} else {
return;
};
let mut locked = shared.lock().unwrap();
locked.update_last_read_at();
}
}
/// If the incoming stream is already closed, convert self into
/// a disabled reporter.
pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self {
if stream.is_end_stream() {
disabled()
} else {
self
}
}
pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> {
#[cfg(feature = "runtime")]
{
if let Some(ref shared) = self.shared {
let locked = shared.lock().unwrap();
if locked.is_keep_alive_timed_out {
return Err(KeepAliveTimedOut.crate_error());
}
}
}
// else
Ok(())
}
}
// ===== impl Ponger =====
impl Ponger {
pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<Ponged> {
let mut locked = self.shared.lock().unwrap();
#[cfg(feature = "runtime")]
let is_idle = self.is_idle();
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
ka.schedule(is_idle, &locked);
ka.maybe_ping(cx, &mut locked);
}
}
if !locked.is_ping_sent() {
// XXX: this doesn't register a waker...?
return Poll::Pending;
}
let (bytes, rtt) = match locked.ping_pong.poll_pong(cx) {
Poll::Ready(Ok(_pong)) => {
let rtt = locked
.ping_sent_at
.expect("pong received implies ping_sent_at")
.elapsed();
locked.ping_sent_at = None;
trace!("recv pong");
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
locked.update_last_read_at();
ka.schedule(is_idle, &locked);
}
}
if let Some(ref mut bdp) = self.bdp {
let bytes = locked.bytes.expect("bdp enabled implies bytes");
locked.bytes = Some(0); // reset
bdp.samples += 1;
trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
(bytes, rtt)
} else {
// no bdp, done!
return Poll::Pending;
}
}
Poll::Ready(Err(e)) => {
debug!("pong error: {}", e);
return Poll::Pending;
}
Poll::Pending => {
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
self.keep_alive = None;
locked.is_keep_alive_timed_out = true;
return Poll::Ready(Ponged::KeepAliveTimedOut);
}
}
}
return Poll::Pending;
}
};
drop(locked);
if let Some(bdp) = self.bdp.as_mut().and_then(|bdp| bdp.calculate(bytes, rtt)) {
Poll::Ready(Ponged::SizeUpdate(bdp))
} else {
// XXX: this doesn't register a waker...?
Poll::Pending
}
}
#[cfg(feature = "runtime")]
fn is_idle(&self) -> bool {
Arc::strong_count(&self.shared) <= 2
}
}
// ===== impl Shared =====
impl Shared {
fn send_ping(&mut self) {
match self.ping_pong.send_ping(Ping::opaque()) {
Ok(()) => {
self.ping_sent_at = Some(Instant::now());
trace!("sent ping");
}
Err(err) => {
debug!("error sending ping: {}", err);
}
}
}
fn is_ping_sent(&self) -> bool {
self.ping_sent_at.is_some()
}
#[cfg(feature = "runtime")]
fn update_last_read_at(&mut self) {
if self.last_read_at.is_some() {
self.last_read_at = Some(Instant::now());
}
}
#[cfg(feature = "runtime")]
fn last_read_at(&self) -> Instant {
self.last_read_at.expect("keep_alive expects last_read_at")
}
}
// ===== impl Bdp =====
/// Any higher than this likely will be hitting the TCP flow control.
const BDP_LIMIT: usize = 1024 * 1024 * 16;
impl Bdp {
fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
// No need to do any math if we're at the limit.
if self.bdp as usize == BDP_LIMIT {
return None;
}
// average the rtt
let rtt = seconds(rtt);
if self.samples < 10 {
// Average the first 10 samples
self.rtt += (rtt - self.rtt) / (self.samples as f64);
} else {
self.rtt += (rtt - self.rtt) / 0.9;
}
// calculate the current bandwidth
let bw = (bytes as f64) / (self.rtt * 1.5);
trace!("current bandwidth = {:.1}B/s", bw);
if bw < self.max_bandwidth {
// not a faster bandwidth, so don't update
return None;
} else {
self.max_bandwidth = bw;
}
// if the current `bytes` sample is at least 2/3 the previous
// bdp, increase to double the current sample.
if (bytes as f64) >= (self.bdp as f64) * 0.66 {
self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;
trace!("BDP increased to {}", self.bdp);
Some(self.bdp)
} else {
None
}
}
}
fn seconds(dur: Duration) -> f64 {
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
let secs = dur.as_secs() as f64;
secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC
}
// ===== impl KeepAlive =====
#[cfg(feature = "runtime")]
impl KeepAlive {
fn schedule(&mut self, is_idle: bool, shared: &Shared) {
match self.state {
KeepAliveState::Init => {
if !self.while_idle && is_idle {
return;
}
self.state = KeepAliveState::Scheduled;
let interval = shared.last_read_at() + self.interval;
self.timer.reset(interval);
}
KeepAliveState::Scheduled | KeepAliveState::PingSent => (),
}
}
fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) {
match self.state {
KeepAliveState::Scheduled => {
if Pin::new(&mut self.timer).poll(cx).is_pending() {
return;
}
// check if we've received a frame while we were scheduled
if shared.last_read_at() + self.interval > self.timer.deadline() {
self.state = KeepAliveState::Init;
cx.waker().wake_by_ref(); // schedule us again
return;
}
trace!("keep-alive interval ({:?}) reached", self.interval);
shared.send_ping();
self.state = KeepAliveState::PingSent;
let timeout = Instant::now() + self.timeout;
self.timer.reset(timeout);
}
KeepAliveState::Init | KeepAliveState::PingSent => (),
}
}
fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {
match self.state {
KeepAliveState::PingSent => {
if Pin::new(&mut self.timer).poll(cx).is_pending() {
return Ok(());
}
trace!("keep-alive timeout ({:?}) reached", self.timeout);
Err(KeepAliveTimedOut)
}
KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()),
}
}
}
// ===== impl KeepAliveTimedOut =====
#[cfg(feature = "runtime")]
impl KeepAliveTimedOut {
pub(super) fn crate_error(self) -> crate::Error {
crate::Error::new(crate::error::Kind::Http2).with(self)
}
}
#[cfg(feature = "runtime")]
impl fmt::Display for KeepAliveTimedOut {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("keep-alive timed out")
}
}
#[cfg(feature = "runtime")]
impl std::error::Error for KeepAliveTimedOut {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&crate::error::TimedOut)
}
}

View File

@@ -1,12 +1,14 @@
use std::error::Error as StdError;
use std::marker::Unpin;
#[cfg(feature = "runtime")]
use std::time::Duration;
use h2::server::{Connection, Handshake, SendResponse};
use h2::Reason;
use pin_project::{pin_project, project};
use tokio::io::{AsyncRead, AsyncWrite};
use super::{bdp, decode_content_length, PipeToSendStream, SendBuf};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
use crate::body::Payload;
use crate::common::exec::H2Exec;
use crate::common::{task, Future, Pin, Poll};
@@ -31,6 +33,10 @@ pub(crate) struct Config {
pub(crate) initial_conn_window_size: u32,
pub(crate) initial_stream_window_size: u32,
pub(crate) max_concurrent_streams: Option<u32>,
#[cfg(feature = "runtime")]
pub(crate) keep_alive_interval: Option<Duration>,
#[cfg(feature = "runtime")]
pub(crate) keep_alive_timeout: Duration,
}
impl Default for Config {
@@ -40,6 +46,10 @@ impl Default for Config {
initial_conn_window_size: DEFAULT_CONN_WINDOW,
initial_stream_window_size: DEFAULT_STREAM_WINDOW,
max_concurrent_streams: None,
#[cfg(feature = "runtime")]
keep_alive_interval: None,
#[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
}
}
}
@@ -60,10 +70,7 @@ where
B: Payload,
{
Handshaking {
/// If Some, bdp is enabled with the initial size.
///
/// If None, bdp is disabled.
bdp_initial_size: Option<u32>,
ping_config: ping::Config,
hs: Handshake<T, SendBuf<B::Data>>,
},
Serving(Serving<T, B>),
@@ -74,7 +81,7 @@ struct Serving<T, B>
where
B: Payload,
{
bdp: Option<(bdp::Sampler, bdp::Estimator)>,
ping: Option<(ping::Recorder, ping::Ponger)>,
conn: Connection<T, SendBuf<B::Data>>,
closing: Option<crate::Error>,
}
@@ -103,10 +110,22 @@ where
None
};
let ping_config = ping::Config {
bdp_initial_window: bdp,
#[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
#[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
// If keep-alive is enabled for servers, always enabled while
// idle, so it can more aggresively close dead connections.
#[cfg(feature = "runtime")]
keep_alive_while_idle: true,
};
Server {
exec,
state: State::Handshaking {
bdp_initial_size: bdp,
ping_config,
hs: handshake,
},
service,
@@ -149,13 +168,17 @@ where
let next = match me.state {
State::Handshaking {
ref mut hs,
ref bdp_initial_size,
ref ping_config,
} => {
let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?;
let bdp = bdp_initial_size
.map(|wnd| bdp::channel(conn.ping_pong().expect("ping_pong"), wnd));
let ping = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
Some(ping::channel(pp, ping_config.clone()))
} else {
None
};
State::Serving(Serving {
bdp,
ping,
conn,
closing: None,
})
@@ -193,7 +216,7 @@ where
{
if self.closing.is_none() {
loop {
self.poll_bdp(cx);
self.poll_ping(cx);
// Check that the service is ready to accept a new request.
//
@@ -231,14 +254,16 @@ where
Some(Ok((req, respond))) => {
trace!("incoming request");
let content_length = decode_content_length(req.headers());
let bdp_sampler = self
.bdp
let ping = self
.ping
.as_ref()
.map(|bdp| bdp.0.clone())
.unwrap_or_else(bdp::disabled);
.map(|ping| ping.0.clone())
.unwrap_or_else(ping::disabled);
let req =
req.map(|stream| crate::Body::h2(stream, content_length, bdp_sampler));
// Record the headers received
ping.record_non_data();
let req = req.map(|stream| crate::Body::h2(stream, content_length, ping));
let fut = H2Stream::new(service.call(req), respond);
exec.execute_h2stream(fut);
}
@@ -247,6 +272,10 @@ where
}
None => {
// no more incoming streams...
if let Some((ref ping, _)) = self.ping {
ping.ensure_not_timed_out()?;
}
trace!("incoming connection complete");
return Poll::Ready(Ok(()));
}
@@ -264,13 +293,18 @@ where
Poll::Ready(Err(self.closing.take().expect("polled after error")))
}
fn poll_bdp(&mut self, cx: &mut task::Context<'_>) {
if let Some((_, ref mut estimator)) = self.bdp {
match estimator.poll_estimate(cx) {
Poll::Ready(wnd) => {
fn poll_ping(&mut self, cx: &mut task::Context<'_>) {
if let Some((_, ref mut estimator)) = self.ping {
match estimator.poll(cx) {
Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
self.conn.set_target_window_size(wnd);
let _ = self.conn.set_initial_window_size(wnd);
}
#[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("keep-alive timed out, closing connection");
self.conn.abrupt_shutdown(h2::Reason::NO_ERROR);
}
Poll::Pending => {}
}
}