start adding tracing spans to internals (#478)
We've adopted `tracing` for diagnostics, but currently, it is just being used as a drop-in replacement for the `log` crate. Ideally, we would want to start emitting more structured diagnostics, using `tracing`'s `Span`s and structured key-value fields. A lot of the logging in `h2` is already written in a style that imitates the formatting of structured key-value logs, but as textual log messages. Migrating the logs to structured `tracing` events therefore is pretty easy to do. I've also started adding spans, mostly in the read path. Finally, I've updated the tests to use `tracing` rather than `env_logger`. The tracing setup happens in a macro, so that a span for each test with the test's name can be generated and entered. This will make the test output easier to read if multiple tests are run concurrently with `--nocapture`. Signed-off-by: Eliza Weisman <eliza@buoyant.io>
This commit is contained in:
@@ -44,6 +44,9 @@ where
|
||||
/// Stream state handler
|
||||
streams: Streams<B, P>,
|
||||
|
||||
/// A `tracing` span tracking the lifetime of the connection.
|
||||
span: tracing::Span,
|
||||
|
||||
/// Client or server
|
||||
_phantom: PhantomData<P>,
|
||||
}
|
||||
@@ -100,6 +103,7 @@ where
|
||||
ping_pong: PingPong::new(),
|
||||
settings: Settings::new(config.settings),
|
||||
streams,
|
||||
span: tracing::debug_span!("Connection", peer = %P::NAME),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -121,6 +125,9 @@ where
|
||||
/// Returns `RecvError` as this may raise errors that are caused by delayed
|
||||
/// processing of received frames.
|
||||
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> {
|
||||
let _e = self.span.enter();
|
||||
let span = tracing::trace_span!("poll_ready");
|
||||
let _e = span.enter();
|
||||
// The order of these calls don't really matter too much
|
||||
ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?;
|
||||
ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?;
|
||||
@@ -200,9 +207,18 @@ where
|
||||
|
||||
/// Advances the internal state of the connection.
|
||||
pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), proto::Error>> {
|
||||
// XXX(eliza): cloning the span is unfortunately necessary here in
|
||||
// order to placate the borrow checker — `self` is mutably borrowed by
|
||||
// `poll2`, which means that we can't borrow `self.span` to enter it.
|
||||
// The clone is just an atomic ref bump.
|
||||
let span = self.span.clone();
|
||||
let _e = span.enter();
|
||||
let span = tracing::trace_span!("poll");
|
||||
let _e = span.enter();
|
||||
use crate::codec::RecvError::*;
|
||||
|
||||
loop {
|
||||
tracing::trace!(connection.state = ?self.state);
|
||||
// TODO: probably clean up this glob of code
|
||||
match self.state {
|
||||
// When open, continue to poll a frame
|
||||
@@ -230,7 +246,7 @@ where
|
||||
// error. This is handled by setting a GOAWAY frame followed by
|
||||
// terminating the connection.
|
||||
Poll::Ready(Err(Connection(e))) => {
|
||||
tracing::debug!("Connection::poll; connection error={:?}", e);
|
||||
tracing::debug!(error = ?e, "Connection::poll; connection error");
|
||||
|
||||
// We may have already sent a GOAWAY for this error,
|
||||
// if so, don't send another, just flush and close up.
|
||||
@@ -250,7 +266,7 @@ where
|
||||
// This is handled by resetting the frame then trying to read
|
||||
// another frame.
|
||||
Poll::Ready(Err(Stream { id, reason })) => {
|
||||
tracing::trace!("stream error; id={:?}; reason={:?}", id, reason);
|
||||
tracing::trace!(?id, ?reason, "stream error");
|
||||
self.streams.send_reset(id, reason);
|
||||
}
|
||||
// Attempting to read a frame resulted in an I/O error. All
|
||||
@@ -258,7 +274,7 @@ where
|
||||
//
|
||||
// TODO: Are I/O errors recoverable?
|
||||
Poll::Ready(Err(Io(e))) => {
|
||||
tracing::debug!("Connection::poll; IO error={:?}", e);
|
||||
tracing::debug!(error = ?e, "Connection::poll; IO error");
|
||||
let e = e.into();
|
||||
|
||||
// Reset all active streams
|
||||
@@ -317,28 +333,28 @@ where
|
||||
|
||||
match ready!(Pin::new(&mut self.codec).poll_next(cx)?) {
|
||||
Some(Headers(frame)) => {
|
||||
tracing::trace!("recv HEADERS; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv HEADERS");
|
||||
self.streams.recv_headers(frame)?;
|
||||
}
|
||||
Some(Data(frame)) => {
|
||||
tracing::trace!("recv DATA; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv DATA");
|
||||
self.streams.recv_data(frame)?;
|
||||
}
|
||||
Some(Reset(frame)) => {
|
||||
tracing::trace!("recv RST_STREAM; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv RST_STREAM");
|
||||
self.streams.recv_reset(frame)?;
|
||||
}
|
||||
Some(PushPromise(frame)) => {
|
||||
tracing::trace!("recv PUSH_PROMISE; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv PUSH_PROMISE");
|
||||
self.streams.recv_push_promise(frame)?;
|
||||
}
|
||||
Some(Settings(frame)) => {
|
||||
tracing::trace!("recv SETTINGS; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv SETTINGS");
|
||||
self.settings
|
||||
.recv_settings(frame, &mut self.codec, &mut self.streams)?;
|
||||
}
|
||||
Some(GoAway(frame)) => {
|
||||
tracing::trace!("recv GOAWAY; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv GOAWAY");
|
||||
// This should prevent starting new streams,
|
||||
// but should allow continuing to process current streams
|
||||
// until they are all EOS. Once they are, State should
|
||||
@@ -347,7 +363,7 @@ where
|
||||
self.error = Some(frame.reason());
|
||||
}
|
||||
Some(Ping(frame)) => {
|
||||
tracing::trace!("recv PING; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv PING");
|
||||
let status = self.ping_pong.recv_ping(frame);
|
||||
if status.is_shutdown() {
|
||||
assert!(
|
||||
@@ -360,11 +376,11 @@ where
|
||||
}
|
||||
}
|
||||
Some(WindowUpdate(frame)) => {
|
||||
tracing::trace!("recv WINDOW_UPDATE; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv WINDOW_UPDATE");
|
||||
self.streams.recv_window_update(frame)?;
|
||||
}
|
||||
Some(Priority(frame)) => {
|
||||
tracing::trace!("recv PRIORITY; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "recv PRIORITY");
|
||||
// TODO: handle
|
||||
}
|
||||
None => {
|
||||
|
||||
@@ -11,6 +11,7 @@ use std::fmt;
|
||||
pub(crate) trait Peer {
|
||||
/// Message type polled from the transport
|
||||
type Poll: fmt::Debug;
|
||||
const NAME: &'static str;
|
||||
|
||||
fn r#dyn() -> Dyn;
|
||||
|
||||
|
||||
@@ -104,6 +104,8 @@ impl Prioritize {
|
||||
stream: &mut store::Ptr,
|
||||
task: &mut Option<Waker>,
|
||||
) {
|
||||
let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id);
|
||||
let _e = span.enter();
|
||||
// Queue the frame in the buffer
|
||||
stream.pending_send.push_back(buffer, frame);
|
||||
self.schedule_send(stream, task);
|
||||
@@ -112,7 +114,7 @@ impl Prioritize {
|
||||
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
|
||||
// If the stream is waiting to be opened, nothing more to do.
|
||||
if stream.is_send_ready() {
|
||||
tracing::trace!("schedule_send; {:?}", stream.id);
|
||||
tracing::trace!(?stream.id, "schedule_send");
|
||||
// Queue the stream
|
||||
self.pending_send.push(stream);
|
||||
|
||||
@@ -158,12 +160,10 @@ impl Prioritize {
|
||||
// Update the buffered data counter
|
||||
stream.buffered_send_data += sz;
|
||||
|
||||
tracing::trace!(
|
||||
"send_data; sz={}; buffered={}; requested={}",
|
||||
sz,
|
||||
stream.buffered_send_data,
|
||||
stream.requested_send_capacity
|
||||
);
|
||||
let span =
|
||||
tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity);
|
||||
let _e = span.enter();
|
||||
tracing::trace!(buffered = stream.buffered_send_data);
|
||||
|
||||
// Implicitly request more send capacity if not enough has been
|
||||
// requested yet.
|
||||
@@ -180,9 +180,8 @@ impl Prioritize {
|
||||
}
|
||||
|
||||
tracing::trace!(
|
||||
"send_data (2); available={}; buffered={}",
|
||||
stream.send_flow.available(),
|
||||
stream.buffered_send_data
|
||||
available = %stream.send_flow.available(),
|
||||
buffered = stream.buffered_send_data,
|
||||
);
|
||||
|
||||
// The `stream.buffered_send_data == 0` check is here so that, if a zero
|
||||
@@ -214,13 +213,14 @@ impl Prioritize {
|
||||
stream: &mut store::Ptr,
|
||||
counts: &mut Counts,
|
||||
) {
|
||||
tracing::trace!(
|
||||
"reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}",
|
||||
stream.id,
|
||||
capacity,
|
||||
capacity + stream.buffered_send_data,
|
||||
stream.requested_send_capacity
|
||||
let span = tracing::trace_span!(
|
||||
"reserve_capacity",
|
||||
?stream.id,
|
||||
requested = capacity,
|
||||
effective = capacity + stream.buffered_send_data,
|
||||
curr = stream.requested_send_capacity
|
||||
);
|
||||
let _e = span.enter();
|
||||
|
||||
// Actual capacity is `capacity` + the current amount of buffered data.
|
||||
// If it were less, then we could never send out the buffered data.
|
||||
@@ -266,13 +266,14 @@ impl Prioritize {
|
||||
inc: WindowSize,
|
||||
stream: &mut store::Ptr,
|
||||
) -> Result<(), Reason> {
|
||||
tracing::trace!(
|
||||
"recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}",
|
||||
stream.id,
|
||||
stream.state,
|
||||
let span = tracing::trace_span!(
|
||||
"recv_stream_window_update",
|
||||
?stream.id,
|
||||
?stream.state,
|
||||
inc,
|
||||
stream.send_flow
|
||||
flow = ?stream.send_flow
|
||||
);
|
||||
let _e = span.enter();
|
||||
|
||||
if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
|
||||
// We can't send any data, so don't bother doing anything else.
|
||||
@@ -324,9 +325,11 @@ impl Prioritize {
|
||||
}
|
||||
|
||||
pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) {
|
||||
let span = tracing::trace_span!("clear_pending_capacity");
|
||||
let _e = span.enter();
|
||||
while let Some(stream) = self.pending_capacity.pop(store) {
|
||||
counts.transition(stream, |_, stream| {
|
||||
tracing::trace!("clear_pending_capacity; stream={:?}", stream.id);
|
||||
tracing::trace!(?stream.id, "clear_pending_capacity");
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -339,7 +342,8 @@ impl Prioritize {
|
||||
) where
|
||||
R: Resolve,
|
||||
{
|
||||
tracing::trace!("assign_connection_capacity; inc={}", inc);
|
||||
let span = tracing::trace_span!("assign_connection_capacity", inc);
|
||||
let _e = span.enter();
|
||||
|
||||
self.flow.assign_capacity(inc);
|
||||
|
||||
@@ -382,15 +386,14 @@ impl Prioritize {
|
||||
// Can't assign more than what is available
|
||||
stream.send_flow.window_size() - stream.send_flow.available().as_size(),
|
||||
);
|
||||
|
||||
let span = tracing::trace_span!("try_assign_capacity", ?stream.id);
|
||||
let _e = span.enter();
|
||||
tracing::trace!(
|
||||
"try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}",
|
||||
stream.id,
|
||||
total_requested,
|
||||
requested = total_requested,
|
||||
additional,
|
||||
stream.buffered_send_data,
|
||||
stream.send_flow.window_size(),
|
||||
self.flow.available()
|
||||
buffered = stream.buffered_send_data,
|
||||
window = stream.send_flow.window_size(),
|
||||
conn = %self.flow.available()
|
||||
);
|
||||
|
||||
if additional == 0 {
|
||||
@@ -416,7 +419,7 @@ impl Prioritize {
|
||||
// TODO: Should prioritization factor into this?
|
||||
let assign = cmp::min(conn_available, additional);
|
||||
|
||||
tracing::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,);
|
||||
tracing::trace!(capacity = assign, "assigning");
|
||||
|
||||
// Assign the capacity to the stream
|
||||
stream.assign_capacity(assign);
|
||||
@@ -426,11 +429,10 @@ impl Prioritize {
|
||||
}
|
||||
|
||||
tracing::trace!(
|
||||
"try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}",
|
||||
stream.send_flow.available(),
|
||||
stream.requested_send_capacity,
|
||||
stream.buffered_send_data,
|
||||
stream.send_flow.has_unavailable()
|
||||
available = %stream.send_flow.available(),
|
||||
requested = stream.requested_send_capacity,
|
||||
buffered = stream.buffered_send_data,
|
||||
has_unavailable = %stream.send_flow.has_unavailable()
|
||||
);
|
||||
|
||||
if stream.send_flow.available() < stream.requested_send_capacity
|
||||
@@ -492,7 +494,7 @@ impl Prioritize {
|
||||
|
||||
match self.pop_frame(buffer, store, max_frame_len, counts) {
|
||||
Some(frame) => {
|
||||
tracing::trace!("writing frame={:?}", frame);
|
||||
tracing::trace!(?frame, "writing");
|
||||
|
||||
debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing);
|
||||
if let Frame::Data(ref frame) = frame {
|
||||
@@ -538,14 +540,15 @@ impl Prioritize {
|
||||
where
|
||||
B: Buf,
|
||||
{
|
||||
tracing::trace!("try reclaim frame");
|
||||
let span = tracing::trace_span!("try_reclaim_frame");
|
||||
let _e = span.enter();
|
||||
|
||||
// First check if there are any data chunks to take back
|
||||
if let Some(frame) = dst.take_last_data_frame() {
|
||||
tracing::trace!(
|
||||
" -> reclaimed; frame={:?}; sz={}",
|
||||
frame,
|
||||
frame.payload().inner.get_ref().remaining()
|
||||
?frame,
|
||||
sz = frame.payload().inner.get_ref().remaining(),
|
||||
"reclaimed"
|
||||
);
|
||||
|
||||
let mut eos = false;
|
||||
@@ -603,11 +606,12 @@ impl Prioritize {
|
||||
}
|
||||
|
||||
pub fn clear_queue<B>(&mut self, buffer: &mut Buffer<Frame<B>>, stream: &mut store::Ptr) {
|
||||
tracing::trace!("clear_queue; stream={:?}", stream.id);
|
||||
let span = tracing::trace_span!("clear_queue", ?stream.id);
|
||||
let _e = span.enter();
|
||||
|
||||
// TODO: make this more efficient?
|
||||
while let Some(frame) = stream.pending_send.pop_front(buffer) {
|
||||
tracing::trace!("dropping; frame={:?}", frame);
|
||||
tracing::trace!(?frame, "dropping");
|
||||
}
|
||||
|
||||
stream.buffered_send_data = 0;
|
||||
@@ -644,16 +648,14 @@ impl Prioritize {
|
||||
where
|
||||
B: Buf,
|
||||
{
|
||||
tracing::trace!("pop_frame");
|
||||
let span = tracing::trace_span!("pop_frame");
|
||||
let _e = span.enter();
|
||||
|
||||
loop {
|
||||
match self.pending_send.pop(store) {
|
||||
Some(mut stream) => {
|
||||
tracing::trace!(
|
||||
"pop_frame; stream={:?}; stream.state={:?}",
|
||||
stream.id,
|
||||
stream.state
|
||||
);
|
||||
let span = tracing::trace_span!("popped", ?stream.id, ?stream.state);
|
||||
let _e = span.enter();
|
||||
|
||||
// It's possible that this stream, besides having data to send,
|
||||
// is also queued to send a reset, and thus is already in the queue
|
||||
@@ -662,11 +664,7 @@ impl Prioritize {
|
||||
// To be safe, we just always ask the stream.
|
||||
let is_pending_reset = stream.is_pending_reset_expiration();
|
||||
|
||||
tracing::trace!(
|
||||
" --> stream={:?}; is_pending_reset={:?};",
|
||||
stream.id,
|
||||
is_pending_reset
|
||||
);
|
||||
tracing::trace!(is_pending_reset);
|
||||
|
||||
let frame = match stream.pending_send.pop_front(buffer) {
|
||||
Some(Frame::Data(mut frame)) => {
|
||||
@@ -676,24 +674,19 @@ impl Prioritize {
|
||||
let sz = frame.payload().remaining();
|
||||
|
||||
tracing::trace!(
|
||||
" --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \
|
||||
available={}; requested={}; buffered={};",
|
||||
frame.stream_id(),
|
||||
sz,
|
||||
frame.is_end_stream(),
|
||||
stream_capacity,
|
||||
stream.send_flow.available(),
|
||||
stream.requested_send_capacity,
|
||||
stream.buffered_send_data,
|
||||
eos = frame.is_end_stream(),
|
||||
window = %stream_capacity,
|
||||
available = %stream.send_flow.available(),
|
||||
requested = stream.requested_send_capacity,
|
||||
buffered = stream.buffered_send_data,
|
||||
"data frame"
|
||||
);
|
||||
|
||||
// Zero length data frames always have capacity to
|
||||
// be sent.
|
||||
if sz > 0 && stream_capacity == 0 {
|
||||
tracing::trace!(
|
||||
" --> stream capacity is 0; requested={}",
|
||||
stream.requested_send_capacity
|
||||
);
|
||||
tracing::trace!("stream capacity is 0");
|
||||
|
||||
// Ensure that the stream is waiting for
|
||||
// connection level capacity
|
||||
@@ -721,34 +714,38 @@ impl Prioritize {
|
||||
// capacity at this point.
|
||||
debug_assert!(len <= self.flow.window_size());
|
||||
|
||||
tracing::trace!(" --> sending data frame; len={}", len);
|
||||
tracing::trace!(len, "sending data frame");
|
||||
|
||||
// Update the flow control
|
||||
tracing::trace!(" -- updating stream flow --");
|
||||
stream.send_flow.send_data(len);
|
||||
tracing::trace_span!("updating stream flow").in_scope(|| {
|
||||
stream.send_flow.send_data(len);
|
||||
|
||||
// Decrement the stream's buffered data counter
|
||||
debug_assert!(stream.buffered_send_data >= len);
|
||||
stream.buffered_send_data -= len;
|
||||
stream.requested_send_capacity -= len;
|
||||
// Decrement the stream's buffered data counter
|
||||
debug_assert!(stream.buffered_send_data >= len);
|
||||
stream.buffered_send_data -= len;
|
||||
stream.requested_send_capacity -= len;
|
||||
|
||||
// Assign the capacity back to the connection that
|
||||
// was just consumed from the stream in the previous
|
||||
// line.
|
||||
self.flow.assign_capacity(len);
|
||||
// Assign the capacity back to the connection that
|
||||
// was just consumed from the stream in the previous
|
||||
// line.
|
||||
self.flow.assign_capacity(len);
|
||||
});
|
||||
|
||||
tracing::trace!(" -- updating connection flow --");
|
||||
self.flow.send_data(len);
|
||||
let (eos, len) = tracing::trace_span!("updating connection flow")
|
||||
.in_scope(|| {
|
||||
self.flow.send_data(len);
|
||||
|
||||
// Wrap the frame's data payload to ensure that the
|
||||
// correct amount of data gets written.
|
||||
// Wrap the frame's data payload to ensure that the
|
||||
// correct amount of data gets written.
|
||||
|
||||
let eos = frame.is_end_stream();
|
||||
let len = len as usize;
|
||||
let eos = frame.is_end_stream();
|
||||
let len = len as usize;
|
||||
|
||||
if frame.payload().remaining() > len {
|
||||
frame.set_end_stream(false);
|
||||
}
|
||||
if frame.payload().remaining() > len {
|
||||
frame.set_end_stream(false);
|
||||
}
|
||||
(eos, len)
|
||||
});
|
||||
|
||||
Frame::Data(frame.map(|buf| Prioritized {
|
||||
inner: buf.take(len),
|
||||
|
||||
Reference in New Issue
Block a user