diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs index a525483f..964bcbfa 100644 --- a/src/client/connect/dns.rs +++ b/src/client/connect/dns.rs @@ -137,7 +137,7 @@ impl GaiResolver { drop(pool) }); - tx.spawn(on_shutdown); + tx.spawn(on_shutdown).expect("can spawn on self"); GaiResolver { tx, @@ -155,7 +155,7 @@ impl Resolve for GaiResolver { self.tx.spawn(GaiBlocking { host: name.host, tx: Some(tx), - }); + }).expect("spawn GaiBlocking"); GaiFuture { rx, _threadpool_keep_alive: self._threadpool_keep_alive.clone(), @@ -225,9 +225,7 @@ impl Future for GaiBlocking { return Poll::Ready(()); } - debug!("resolving host={:?}", self.host); - let res = (&*self.host, 0).to_socket_addrs() - .map(|i| IpAddrs { iter: i }); + let res = self.block(); let tx = self.tx.take().expect("polled after complete"); let _ = tx.send(res); diff --git a/src/common/drain.rs b/src/common/drain.rs index 561b733a..31ea9267 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -18,7 +18,7 @@ pub fn channel() -> (Signal, Watch) { ( Signal { drained_rx, - tx, + _tx: tx, }, Watch { drained_tx, @@ -29,7 +29,7 @@ pub fn channel() -> (Signal, Watch) { pub struct Signal { drained_rx: mpsc::Receiver, - tx: watch::Sender, + _tx: watch::Sender, } pub struct Draining { diff --git a/src/lib.rs b/src/lib.rs index 0a6ac08b..c9fd1bcd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,9 +2,8 @@ #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![deny(rust_2018_idioms)] -// XXX NOOOOOOOO +// TODO: re-enable denial when all lib tests are re-enabled. //#![cfg_attr(test, deny(warnings))] -#![allow(warnings)] #![cfg_attr(all(test, feature = "nightly"), feature(test))] //! # hyper diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index bd8e048d..28c5a5d3 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -100,23 +100,6 @@ where T::update_date(); ready!(self.poll_loop(cx))?; - loop { - self.poll_read(cx)?; - self.poll_write(cx)?; - self.poll_flush(cx)?; - - // This could happen if reading paused before blocking on IO, - // such as getting to the end of a framed message, but then - // writing/flushing set the state back to Init. In that case, - // if the read buffer still had bytes, we'd want to try poll_read - // again, or else we wouldn't ever be woken up again. - // - // Using this instead of task::current() and notify() inside - // the Conn is noticeably faster in pipelined benchmarks. - if !self.conn.wants_read_again() { - break; - } - } if self.is_done() { if let Some(pending) = self.conn.pending_upgrade() { @@ -139,9 +122,9 @@ where // 16 was chosen arbitrarily, as that is number of pipelined requests // benchmarks often use. Perhaps it should be a config option instead. for _ in 0..16 { - self.poll_read(cx)?; - self.poll_write(cx)?; - self.poll_flush(cx)?; + let _ = self.poll_read(cx)?; + let _ = self.poll_write(cx)?; + let _ = self.poll_flush(cx)?; // This could happen if reading paused before blocking on IO, // such as getting to the end of a framed message, but then