Compare commits

...

31 Commits

Author SHA1 Message Date
Sean McArthur
509672aada feat(client): introduce version-specific client modules (#2906)
This creates `client::conn::http1` and `client::conn::http2` modules,
both with specific `SendRequest`, `Connection`, and `Builder` types.
2022-07-20 14:12:29 -07:00
deantvv
09e35668e5 feat(ffi): add http1_allow_multiline_headers (#2918) 2022-07-19 12:18:09 -07:00
Sean McArthur
3660443108 test(client,server): add back tests around streaming bodies (#2905) 2022-06-29 06:39:21 -07:00
Oddbjørn Grødem
ce72f73464 feat(lib): remove stream cargo feature (#2896)
Closes #2855
2022-06-23 15:12:24 -07:00
Sean McArthur
a563404033 chore(lib): bump MSRV to 1.56 (#2902) 2022-06-23 14:52:36 -07:00
silence-coding
5fa113ebff fix(http1): fix http1_header_read_timeout to use same future (#2891)
Co-authored-by: silence <xxx@email.com>
2022-06-13 13:44:28 -07:00
Oddbjørn Grødem
e9cab49e6e feat(server): remove AddrStream struct (#2869)
remove addrstream type, it provides no benefit over tokio::net::tcpstream

Closes #2850
2022-06-08 16:59:32 -07:00
Sean McArthur
2c7344a65b chore(lib): begin 1.0 development (#2882) 2022-06-08 16:28:18 -07:00
Adam C. Foltzer
b2052a433f feat(ext): support non-canonical HTTP/1 reason phrases (#2792)
Add a new extension type `hyper::ext::ReasonPhrase` gated by either the `ffi` or `http1` Cargo
features. When enabled, store any non-canonical reason phrases in this extension when parsing
responses, and write this reason phrase instead of the canonical reason phrase when emitting
responses.

Reason phrases are a disused corner of the spec that implementations ought to treat as opaque blobs
of bytes. Unfortunately, real-world traffic sometimes does depend on being able to inspect and
manipulate them.

Non-canonical reason phrases are checked for validity at runtime to prevent invalid and dangerous
characters from being emitted when writing responses. An `unsafe` escape hatch is present for hyper
itself to create reason phrases that have been parsed (and therefore implicitly validated) by
httparse.
2022-06-08 15:57:33 -07:00
evenbetter
f12d4d4aa8 refactor(lib): resolve unused import (#2889) 2022-06-07 08:10:29 -07:00
Kian-Meng Ang
4545c3ef19 docs(proto): fix typos (#2876) 2022-05-30 07:43:39 -07:00
Sean McArthur
f8e2a83194 v0.14.19 2022-05-27 12:05:38 -07:00
Ryan Russell
a929df843e docs(various): fix typos in VISION and ROADMAP (#2875)
Signed-off-by: r <ryanrussell@users.noreply.github.com>
2022-05-27 13:51:32 +00:00
Nylonicious
3a755a632d chore(lib): update tokio-util to 0.7 (#2762) 2022-05-26 16:26:31 -07:00
Sean McArthur
4678be9e81 docs(contrib): add guide for Triaging Issues 2022-05-25 14:13:09 -05:00
Sean McArthur
775fac114b docs(lib): propose 1.0 roadmap (#2806) 2022-05-20 10:08:55 -07:00
silence-coding
a32658c1ae feat(server): add Connection::http2_max_header_list_size option (#2828)
This allows setting the HTTP/2 `SETTINGS_MAX_HEADER_LIST_SIZE` which advertises to the peer the maximum header size allowed, and internally is enforced.

Closes #2826
2022-05-18 16:52:01 -07:00
Jannes (思明)
67b73138f1 fix(server): don't add implicit content-length to HEAD responses (#2836)
HEAD responses should not have content-length implicitly set by hyper.

Co-authored-by: Jannes Timm <jannes@cloudflare.com>
2022-05-18 10:49:58 -07:00
Basti Ortiz
faf24c6ad8 refactor(http1): assorted code readability improvements in h1/conn.rs (#2817)
* refactor: use `matches` macro to flatten code

* refactor: prefer `matches` over explicit matching

* refactor: reuse `can_write_body` method

* refactor: use `matches` over `if`-`let`

* refactor: nested `if`-`else` as early return

* refactor: move inner `match` logic outside

* refactor: unneeded `return` in `match`

* refactor: remove unneeded reference matching

* refactor: use early returns for idle check

* refactor: use `matches` macro for Boolean `match`
2022-04-26 11:29:49 -07:00
Anthony Ramine
6a35c175f2 fix(http1): fix preserving header case without enabling ffi (#2820)
The previous commit broke this, but it wasn't released, so released versions will never notice the breakage.
2022-04-26 08:24:57 -07:00
cui fliter
89598dfcfe docs(lib): fix some typos (#2818)
Signed-off-by: cuishuang <imcusg@gmail.com>
2022-04-25 11:05:48 -07:00
Liam Warfield
78de8914ea feature(ffi): add connection option to preserve header order (#2798)
Libcurl expects that headers are iterated in the same order that they
are recieved. Previously this caused curl tests 580 and 581 to fail.
This necessitated exposing a way to preserve the original ordering of
http headers.

SUMMARY OF CHANGES: Add a new data structure called OriginalHeaderOrder that
represents the order in which headers originally appear in a HTTP
message. This datastructure is `Vec<(Headername, multimap-index)>`.
This vector is ordered by the order which headers were recieved.
Add the following ffi functions:
- ffi::client::hyper_clientconn_options_set_preserve_header_order : An
     ffi interface to configure a connection to preserve header order.
- ffi::client::hyper_clientconn_options_set_preserve_header_case : An
     ffi interface to configure a connection to preserve header case.
- Add a new option to ParseContext, and Conn::State called `preserve_header_order`.
  This option, and all the code paths it creates are behind the `ffi`
  feature flag. This should not change performance of response parsing for
  non-ffi users.

Closes #2780

BREAKING CHANGE: hyper_clientconn_options_new no longer
  sets the http1_preserve_header_case connection option by default.
  Users should now call
  hyper_clientconn_options_set_preserve_header_case
  if they desire that functionality.
2022-04-23 09:05:37 -07:00
Ralf Jung
e1138d716d test(lib): add CI job to test with Miri 2022-04-21 12:55:52 -07:00
Ralf Jung
8834d5a2a7 test(lib): fix tests with more feature combinations 2022-04-21 12:55:52 -07:00
Ilya Trefilov
ffbf610b16 feat(server): add AddrStream::local_addr() (#2816)
Expose local address of tcp connection in AddrStream.

Closes #2773
2022-04-20 15:35:36 -07:00
Sean McArthur
d2c945e8ed docs(contrib): add contributing guide for submitting pull requests 2022-04-15 09:19:32 -07:00
Sean McArthur
311ba2b97e docs(lib): define the VISION and TENETS 2022-04-12 16:54:27 -07:00
Basti Ortiz
1d895b8dfc refactor(capi): make early returns consistent in C examples (#2812)
- Since the `if` condition already causes the loop to `break`,
  the `else` is not necessary. We wouldn't have reached the `else`
  block, anyway, if the prior `if` condition passed.
- We are more likely to poll a successful chunk than finish
  the request or throw an error. Thus, it is best if we go
  for the optimistic route and check for the successful
  case first.
2022-04-11 17:20:14 -07:00
SabrinaJewson
e3ee1de32d style(server): rustfmt 2022-04-08 17:02:02 -07:00
SabrinaJewson
dd08d9c3e5 refactor(server): simplify server cfgs
`server.rs` is currently littered with `cfg`s for `http1` or `http2`,
since the majority of server behaviour is only available with either one
of those feature flags active. This is hard to maintain and confusing to
read, so this commit extracts the two implementations into their own
files, since there is very little benefit in sharing code between the
two.
2022-04-08 17:02:02 -07:00
SabrinaJewson
0fec1c8737 refactor(server): move non-conn code out of conn.rs
The actual code for `Server` was previously organized very confusingly:
it was thrice layered with `SpawnAll` and `Serve` which both appeared in
conn.rs despite not having anything to do with the lower-level conn API.
This commit changes that, removing all layering and having the code for
the higher-level `Server` appear inside `server.rs` only.
2022-04-08 17:02:02 -07:00
52 changed files with 3318 additions and 1039 deletions

View File

@@ -16,6 +16,7 @@ jobs:
- style
- test
- msrv
- miri
- features
- ffi
- ffi-header
@@ -100,7 +101,7 @@ jobs:
strategy:
matrix:
rust:
- 1.49 # keep in sync with MSRV.md dev doc
- 1.56 # keep in sync with MSRV.md dev doc
os:
- ubuntu-latest
@@ -124,6 +125,27 @@ jobs:
command: check
args: --features full
miri:
name: Test with Miri
needs: [style]
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
components: miri
override: true
- name: Test
# Can't enable tcp feature since Miri does not support the tokio runtime
run: MIRIFLAGS="-Zmiri-disable-isolation" cargo miri test --features http1,http2,client,server,nightly
features:
name: features
needs: [style]

View File

@@ -1,3 +1,26 @@
### v0.14.19 (2022-05-27)
#### Bug Fixes
* **http1:** fix preserving header case without enabling ffi (#2820) ([6a35c175](https://github.com/hyperium/hyper/commit/6a35c175f2b416851518b5831c2c7827d6dbd822))
* **server:** don't add implicit content-length to HEAD responses (#2836) ([67b73138](https://github.com/hyperium/hyper/commit/67b73138f110979f3c77ef7b56588f018837e592))
#### Features
* **server:**
* add `Connection::http2_max_header_list_size` option (#2828) ([a32658c1](https://github.com/hyperium/hyper/commit/a32658c1ae7f1261fa234a767df963be4fc63521), closes [#2826](https://github.com/hyperium/hyper/issues/2826))
* add `AddrStream::local_addr()` (#2816) ([ffbf610b](https://github.com/hyperium/hyper/commit/ffbf610b1631cabfacb20886270e3c137fa93800), closes [#2773](https://github.com/hyperium/hyper/issues/2773))
#### Breaking Changes
* **ffi (unstable):**
* `hyper_clientconn_options_new` no longer sets the `http1_preserve_header_case` connection option by default.
Users should now call `hyper_clientconn_options_set_preserve_header_case` if they desire that functionality. ([78de8914](https://github.com/hyperium/hyper/commit/78de8914eadeab4b9a2c71a82c77b2ce33fe6c74))
### v0.14.18 (2022-03-22)

View File

@@ -5,6 +5,7 @@ You want to contribute? You're awesome! Don't know where to start? Check the [li
[easy tag]: https://github.com/hyperium/hyper/issues?q=label%3AE-easy+is%3Aopen
## Pull Requests
## [Pull Requests](./docs/PULL_REQUESTS.md)
- [Submitting a Pull Request](./docs/PULL_REQUESTS.md#submitting-a-pull-request)
- [Commit Guidelines](./docs/COMMITS.md)

View File

@@ -1,6 +1,6 @@
[package]
name = "hyper"
version = "0.14.18"
version = "1.0.0-dev.0"
description = "A fast and correct HTTP library."
readme = "README.md"
homepage = "https://hyper.rs"
@@ -12,6 +12,8 @@ keywords = ["http", "hyper", "hyperium"]
categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"]
edition = "2018"
publish = false # no accidents while in dev
include = [
"Cargo.toml",
"LICENSE",
@@ -25,7 +27,8 @@ futures-core = { version = "0.3", default-features = false }
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false }
http = "0.2"
http-body = "0.4"
http-body = { git = "https://github.com/hyperium/http-body", branch = "master" }
http-body-util = { git = "https://github.com/hyperium/http-body", branch = "master" }
httpdate = "1.0"
httparse = "1.6"
h2 = { version = "0.3.9", optional = true }
@@ -61,7 +64,7 @@ tokio = { version = "1", features = [
"test-util",
] }
tokio-test = "0.4"
tokio-util = { version = "0.6", features = ["codec"] }
tokio-util = { version = "0.7", features = ["codec"] }
tower = { version = "0.4", features = ["make", "util"] }
url = "2.2"
@@ -78,7 +81,6 @@ full = [
"http1",
"http2",
"server",
"stream",
"runtime",
]
@@ -90,9 +92,6 @@ http2 = ["h2"]
client = []
server = []
# `impl Stream` for things
stream = []
# Tokio support
runtime = [
"tcp",

View File

@@ -8,6 +8,10 @@
A **fast** and **correct** HTTP implementation for Rust.
> **Note**: hyper's [master](https://github.com/hyperium/hyper) branch is
> currently preparing breaking changes. For the most recently *released* code,
> look to the [0.14.x branch](https://github.com/hyperium/hyper/tree/0.14.x).
- HTTP/1 and HTTP/2
- Asynchronous design
- Leading in performance

View File

@@ -6,7 +6,7 @@ extern crate test;
use bytes::Buf;
use futures_util::stream;
use futures_util::StreamExt;
use hyper::body::Body;
use http_body_util::StreamBody;
macro_rules! bench_stream {
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
@@ -20,9 +20,10 @@ macro_rules! bench_stream {
$bencher.iter(|| {
rt.block_on(async {
let $body_pat = Body::wrap_stream(
let $body_pat = StreamBody::new(
stream::iter(__s.iter()).map(|&s| Ok::<_, std::convert::Infallible>(s)),
);
$block;
});
});

View File

@@ -9,10 +9,11 @@ use std::sync::mpsc;
use std::time::Duration;
use futures_util::{stream, StreamExt};
use http_body_util::StreamBody;
use tokio::sync::oneshot;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Response, Server};
use hyper::{Response, Server};
macro_rules! bench_server {
($b:ident, $header:expr, $body:expr) => {{
@@ -101,7 +102,7 @@ fn throughput_fixedsize_large_payload(b: &mut test::Bencher) {
fn throughput_fixedsize_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("content-length", "1000000"), || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
Body::wrap_stream(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
})
}
@@ -123,7 +124,7 @@ fn throughput_chunked_large_payload(b: &mut test::Bencher) {
fn throughput_chunked_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("transfer-encoding", "chunked"), || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
Body::wrap_stream(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
})
}

View File

@@ -24,44 +24,42 @@ static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t b
struct conn_data *conn = (struct conn_data *)userdata;
ssize_t ret = read(conn->fd, buf, buf_len);
if (ret < 0) {
int err = errno;
if (err == EAGAIN) {
if (ret >= 0) {
return ret;
}
if (errno != EAGAIN) {
// kaboom
return HYPER_IO_ERROR;
}
// would block, register interest
if (conn->read_waker != NULL) {
hyper_waker_free(conn->read_waker);
}
conn->read_waker = hyper_context_waker(ctx);
return HYPER_IO_PENDING;
} else {
// kaboom
return HYPER_IO_ERROR;
}
} else {
return ret;
}
}
static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) {
struct conn_data *conn = (struct conn_data *)userdata;
ssize_t ret = write(conn->fd, buf, buf_len);
if (ret < 0) {
int err = errno;
if (err == EAGAIN) {
if (ret >= 0) {
return ret;
}
if (errno != EAGAIN) {
// kaboom
return HYPER_IO_ERROR;
}
// would block, register interest
if (conn->write_waker != NULL) {
hyper_waker_free(conn->write_waker);
}
conn->write_waker = hyper_context_waker(ctx);
return HYPER_IO_PENDING;
} else {
// kaboom
return HYPER_IO_ERROR;
}
} else {
return ret;
}
}
static void free_conn_data(struct conn_data *conn) {
@@ -98,9 +96,9 @@ static int connect_to(const char *host, const char *port) {
if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) {
break;
} else {
close(sfd);
}
close(sfd);
}
freeaddrinfo(result);
@@ -151,8 +149,8 @@ int main(int argc, char *argv[]) {
if (fd < 0) {
return 1;
}
printf("connected to %s, now get %s\n", host, path);
printf("connected to %s, now get %s\n", host, path);
if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
printf("failed to set socket to non-blocking\n");
return 1;
@@ -168,7 +166,6 @@ int main(int argc, char *argv[]) {
conn->read_waker = NULL;
conn->write_waker = NULL;
// Hookup the IO
hyper_io *io = hyper_io_new();
hyper_io_set_userdata(io, (void *)conn);
@@ -315,16 +312,17 @@ int main(int argc, char *argv[]) {
if (sel_ret < 0) {
printf("select() error\n");
return 1;
} else {
}
if (FD_ISSET(conn->fd, &fds_read)) {
hyper_waker_wake(conn->read_waker);
conn->read_waker = NULL;
}
if (FD_ISSET(conn->fd, &fds_write)) {
hyper_waker_wake(conn->write_waker);
conn->write_waker = NULL;
}
}
}

View File

@@ -24,44 +24,42 @@ static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t b
struct conn_data *conn = (struct conn_data *)userdata;
ssize_t ret = read(conn->fd, buf, buf_len);
if (ret < 0) {
int err = errno;
if (err == EAGAIN) {
if (ret >= 0) {
return ret;
}
if (errno != EAGAIN) {
// kaboom
return HYPER_IO_ERROR;
}
// would block, register interest
if (conn->read_waker != NULL) {
hyper_waker_free(conn->read_waker);
}
conn->read_waker = hyper_context_waker(ctx);
return HYPER_IO_PENDING;
} else {
// kaboom
return HYPER_IO_ERROR;
}
} else {
return ret;
}
}
static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) {
struct conn_data *conn = (struct conn_data *)userdata;
ssize_t ret = write(conn->fd, buf, buf_len);
if (ret < 0) {
int err = errno;
if (err == EAGAIN) {
if (ret >= 0) {
return ret;
}
if (errno != EAGAIN) {
// kaboom
return HYPER_IO_ERROR;
}
// would block, register interest
if (conn->write_waker != NULL) {
hyper_waker_free(conn->write_waker);
}
conn->write_waker = hyper_context_waker(ctx);
return HYPER_IO_PENDING;
} else {
// kaboom
return HYPER_IO_ERROR;
}
} else {
return ret;
}
}
static void free_conn_data(struct conn_data *conn) {
@@ -98,9 +96,9 @@ static int connect_to(const char *host, const char *port) {
if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) {
break;
} else {
close(sfd);
}
close(sfd);
}
freeaddrinfo(result);
@@ -126,17 +124,20 @@ static int poll_req_upload(void *userdata,
struct upload_body* upload = userdata;
ssize_t res = read(upload->fd, upload->buf, upload->len);
if (res < 0) {
printf("error reading upload file: %d", errno);
return HYPER_POLL_ERROR;
} else if (res == 0) {
// All done!
*chunk = NULL;
return HYPER_POLL_READY;
} else {
if (res > 0) {
*chunk = hyper_buf_copy(upload->buf, res);
return HYPER_POLL_READY;
}
if (res == 0) {
// All done!
*chunk = NULL;
return HYPER_POLL_READY;
}
// Oh no!
printf("error reading upload file: %d", errno);
return HYPER_POLL_ERROR;
}
static int print_each_header(void *userdata,
@@ -348,7 +349,8 @@ int main(int argc, char *argv[]) {
hyper_executor_push(exec, body_data);
break;
} else {
}
assert(task_type == HYPER_TASK_EMPTY);
hyper_task_free(task);
hyper_body_free(resp_body);
@@ -361,7 +363,6 @@ int main(int argc, char *argv[]) {
free(upload.buf);
return 0;
}
case EXAMPLE_NOT_SET:
// A background task for hyper completed...
hyper_task_free(task);
@@ -387,19 +388,19 @@ int main(int argc, char *argv[]) {
if (sel_ret < 0) {
printf("select() error\n");
return 1;
} else {
}
if (FD_ISSET(conn->fd, &fds_read)) {
hyper_waker_wake(conn->read_waker);
conn->read_waker = NULL;
}
if (FD_ISSET(conn->fd, &fds_write)) {
hyper_waker_wake(conn->write_waker);
conn->write_waker = NULL;
}
}
}
return 0;
}

View File

@@ -355,6 +355,22 @@ void hyper_clientconn_free(struct hyper_clientconn *conn);
*/
struct hyper_clientconn_options *hyper_clientconn_options_new(void);
/*
Set the whether or not header case is preserved.
Pass `0` to allow lowercase normalization (default), `1` to retain original case.
*/
void hyper_clientconn_options_set_preserve_header_case(struct hyper_clientconn_options *opts,
int enabled);
/*
Set the whether or not header order is preserved.
Pass `0` to allow reordering (default), `1` to retain original ordering.
*/
void hyper_clientconn_options_set_preserve_header_order(struct hyper_clientconn_options *opts,
int enabled);
/*
Free a `hyper_clientconn_options *`.
*/
@@ -386,6 +402,16 @@ enum hyper_code hyper_clientconn_options_http2(struct hyper_clientconn_options *
enum hyper_code hyper_clientconn_options_headers_raw(struct hyper_clientconn_options *opts,
int enabled);
/*
Set whether HTTP/1 connections will accept obsolete line folding for header values.
Newline codepoints (\r and \n) will be transformed to spaces when parsing.
Pass `0` to disable, `1` to enable.
*/
enum hyper_code hyper_clientconn_options_http1_allow_multiline_headers(struct hyper_clientconn_options *opts,
int enabled);
/*
Frees a `hyper_error`.
*/

View File

@@ -4,7 +4,7 @@
- Don't be mean.
- Insulting anyone is prohibited.
- Harrassment of any kind is prohibited.
- Harassment of any kind is prohibited.
- If another person feels uncomfortable with your remarks, stop it.
- If a moderator deems your comment or conduct as inappropriate, stop it.
- Disagreeing is fine, but keep it to technical arguments. Never attack the person.

View File

@@ -2,6 +2,68 @@
The [issue tracker][issues] for hyper is where we track all features, bugs, and discuss proposals.
## Triaging
Once an issue has been opened, it is normal for there to be discussion
around it. Some contributors may have differing opinions about the issue,
including whether the behavior being seen is a bug or a feature. This
discussion is part of the process and should be kept focused, helpful, and
professional.
The objective of helping with triaging issues is to help reduce the issue
backlog and keep the issue tracker healthy, while enabling newcomers another
meaningful way to get engaged and contribute.
### Acknowledge
Acknowledge the human. This is meant actively, such as giving a welcome, or
thanks for a detailed report, or any other greeting that makes the person feel
that their contribution (issues are contributions!) is valued. It also is meant
to be internalized, and be sure to always [treat the person kindly][COC]
throughout the rest of the steps of triaging.
### Ask for more info
Frequently, we need more information than was originally provided to fully
evaluate an issue.
If it is a bug report, ask follow up questions that help us get a [minimum
reproducible example][MRE]. This may take several round-trip questions. Once
all the details are gathered, it may be helpful to edit the original issue text
to include them all.
### Categorize
Once enough information has been gathered, the issue should be categorized
with [labels][#labels]. Ideally, most issues should be labelled with an area,
effort, and severity. An issue _can_ have multiple areas, pick what fits. There
should be only one severity, and the descriptions of each should help to pick
the right one. The hardest label to select is "effort". If after reading the
descriptions of each effort level, you're still unsure, you can ping a
maintainer to pick one.
### Adjust the title
An optional step when triaging is to adjust the title once more information is
known. Sometimes an issue starts as a question, and through discussion, it
turns out to be a feature request, or a bug report. In those cases, the title
should be changed from a question, and the title should be a succinct action to
be taken. For example, a question about an non-existent configuration option
may be reworded to "Add option to Client to do Zed".
### Mentoring
The last part of triaging is to try to make the issue a learning experience.
After a discussion with the reporter, it would be good to ask if they are now
interested in submitting the change described in the issue.
Otherwise, it would be best to leave the issue with a series of steps for
anyone else to try to write the change. That could be pointing out that a
design proposal is needed, addressing certain points. Or, if the required
changes are mostly know, a list of links to modules and functions where code
needs to be changed, and to what. That way we mentor newcomers to become
successful contributors of new [pull requests][PRs].
## Labels
Issues are organized with a set of labels. Most labels follow a system of being prefixed by a "type".
@@ -47,3 +109,5 @@ The severity marks how _severe_ the issue is. Note this isn't "importance" or "p
- **S-refactor**: improve internal code to help readability and maintenance.
[issues]: https://github.com/hyperium/hyper/issues
[COC]: ./CODE_OF_CONDUCT.md
[PRs]: ./PULL_REQUESTS.md

View File

@@ -6,4 +6,4 @@ hyper. It is possible that an older compiler can work, but that is not
guaranteed. We try to increase the MSRV responsibly, only when a significant
new feature is needed.
The current MSRV is: **1.49**.
The current MSRV is: **1.56**.

49
docs/PULL_REQUESTS.md Normal file
View File

@@ -0,0 +1,49 @@
# Pull Requests
Pull requests are the way to submit changes to the hyper repository.
## Submitting a Pull Request
In most cases, it a good idea to discuss a potential change in an
[issue](ISSUES.md). This will allow other contributors to provide guidance and
feedback _before_ significant code work is done, and can increase the
likelihood of getting the pull request merged.
### Tests
If the change being proposed alters code (as opposed to only documentation for
example), it is either adding new functionality to hyper or it is fixing
existing, broken functionality. In both of these cases, the pull request should
include one or more tests to ensure that hyper does not regress in the future.
### Commits
Once code, tests, and documentation have been written, a commit needs to be
made. Following the [commit guidelines](COMMITS.md) will help with the review
process by making your change easier to understand, and makes it easier for
hyper to produce a valuable changelog with each release.
However, if your message doesn't perfectly match the guidelines, **do not
worry!** The person that eventually merges can easily fixup the message at that
time.
### Opening the Pull Request
From within GitHub, open a new pull request from your personal branch.
Once opened, pull requests are usually reviewed within a few days.
### Discuss and Update
You will probably get feedback or requests for changes to your Pull Request.
This is a big part of the submission process so don't be discouraged! Some
contributors may sign off on the Pull Request right away, others may have more
detailed comments or feedback. This is a necessary part of the process in order
to evaluate whether the changes are correct and necessary.
Any community member can review a PR and you might get conflicting feedback.
Keep an eye out for comments from code owners to provide guidance on
conflicting feedback.
You don't need to close the PR and create a new one to address feedback. You
may simply push new commits to the existing branch.

404
docs/ROADMAP.md Normal file
View File

@@ -0,0 +1,404 @@
# hyper 1.0 Roadmap
## Goal
Align current hyper to the [hyper VISION][VISION].
The VISION outlines a decision-making framework, use-cases, and general shape
of hyper. This roadmap describes the currently known problems with hyper, and
then shows what changes are needed to make hyper 1.0 look more like what is in
the VISION.
## Known Issues
> **Note**: These known issues are as of hyper v0.14.x. After v1.0 is released,
ideally these issues will have been solved. Keeping this history may be helpful
to Future Us, though.
### Higher-level Client and Server problems
Both the higher-level `Client` and `Server` types have stability concerns.
For the `hyper::Server`:
- The `Accept` trait is complex, and too easy to get wrong. If used with TLS, a slow TLS handshake
can affect all other new connections waiting for it to finish.
- The `MakeService<&IO>` is confusing. The bounds are an assault on the eyes.
- The `MakeService` API doesn't allow to easily annotate the HTTP connection with `tracing`.
- Graceful shutdown doesn't give enough control.
It's more common for people to simply use `hyper::server::conn` at this point,
than to bother with the `hyper::Server`.
While the `hyper::Client` is much easier to use, problems still exist:
- The whole `Connect` design isn't stable.
- ALPN and proxies can provide surprising extra configuration of connections.
- Some `Connect` implementations may wish to view the path, in addition to the scheme, host, and port.
- Wants `runtime` feature
- The Pool could be made more general or composable. At the same time, more customization is
desired, and it's not clear
how to expose it yet.
### Runtime woes
hyper has been able to support different runtimes, but it has sometimes awkward
default support for Tokio.
- The `runtime` cargo-feature isn't additive
- Built-in Tokio support can be confusing
- Executors and Timers
- The `runtime` feature currently enables a few options that require a timer, such as timeouts and
keepalive intervals. It implicitly relies on Tokio's timer context. This can be quite confusing.
- IO traits
- Should we publicly depend on Tokio's traits?
- `futures-io`?
- Definitely nope.
- Not stable. (0.3?)
- No uninitialized memory.
- Eventual `std` traits?
- They've been in design for years.
- We cannot base our schedule on them.
- When they are stable, we can:
- Provide a bridge in `hyper-util`.
- Consider a 2.0 of hyper.
- Define our own traits, provide util wrappers?
### Forwards-compatibility
There's a concern about forwards-compatibility. We want to be able to add
support for new HTTP features without needing a new major version. While most
of `http` and `hyper` are prepared for that, there's two potential problems.
- New frames on an HTTP stream (body)
- Receiving a new frame type would require a new trait method
- There's no way to implement a "receive unknown frame" that hyper doesn't know about.
- Sending an unknown frame type would be even harder.
- Besides being able to pass an "unknown" type through the trait, the user would need to be
able to describe how that frame is encoded in HTTP/2/3.
- New HTTP versions
- HTTP/3 will require a new transport abstraction. It's not as simple as just using some
`impl AsyncRead + AsyncWrite`. While HTTP/2 bundled the concept of stream creation internally,
and thus could be managed wholly on top of a read-write transport, HTTP/3 is different. Stream
creation is shifted to the QUIC protocol, and HTTP/3 needs to be able to use that directly.
- This means the existing `Connection` types for both client and server will not be able to
accept a QUIC transport so we can add HTTP/3 support.
### Errors
It's not easy to match for specific errors.
The `Error::source()` can leak an internal dependency. For example, a
`hyper::Error` may wrap an `h2::Error`. Users can downcast the source at
runtime, and hyper internally changing the version of its `h2` dependency can
cause runtime breakage for users.
Formatting errors is in conflict with the current expected norm. The
`fmt::Display` implementation for `hyper::Error` currently prints its own
message, and then prints the message of any wrapped source error. The Errors
Working Group currently recommends that errors only print their own message
(link?). This conflict means that error "reporters", which crawl a source chain
and print each error, has a lot of duplicated information.
```
error fetching website: error trying to connect: tcp connect error: Connection refused (os error 61)
tcp connect error: Connection refused (os error 61)
Connection refused (os error 61)
```
While there is a good reason for why hyper's `Error` types do this, at the very
least, it _is_ unfortunate.
### You call hyper, or hyper calls you?
> Note: this problem space, of who calls whom, will be explored more deeply in
> a future article.
At times, it's been wondered whether hyper should call user code, or if user
code should call hyper. For instance, should a `Service` be called with a
request when the connection receives one, or should the user always poll for
the next request.
There's a similar question around sending a message body. Should hyper ask the
body for more data to write, or should the user call a `write` method directly?
These both get at a root topic about [write
observability](https://github.com/hyperium/hyper/issues/2181). How do you know
when a response, or when body data, has been written successfully? This is
desirable for metrics, or for triggering other side-effects.
The `Service` trait also has some other frequently mentioned issues. Does
`poll_ready` pull its complexity weight for servers? What about returning
errors, what does that mean? Ideally users would turn all errors into
appropriate `http::Response`s. But in HTTP/2 and beyond, stream errors are
different from HTTP Server Error responses. Could the `Service::Error` type do
more to encourage best practices?
## Design
The goal is to get hyper closer to the [VISION][], using that to determine the
best way to solve the known issues above. The main thrust of the proposed
changes are to make hyper more **Flexible** and stable.
In order to keep hyper **Understandable**, however, the proposed changes *must*
be accompanied by providing utilities that solve the common usage patterns,
documentation explaining how to use the more flexible pieces, and guides on how
to reach for the `hyper-util`ity belt.
The majority of the changes are smaller and can be contained to the *Public
API* section, since they usually only apply to a single module or type. But the
biggest changes are explained in detail here.
### Split per HTTP version
The existing `Connection` types, both for the client and server, abstract over
HTTP version by requiring a generic `AsyncRead + AsyncWrite` transport type.
But as we figure out HTTP/3, that needs to change. So to prepare now, the
`Connection` types will be split up.
For example, there will now be `hyper::server::conn::http1::Connection` and
`hyper::server::conn::http2::Connection` types.
These specific types will still have a very similar looking API that, as the
VISION describes, provides **Correct** connection management as it pertains to
HTTP.
There will be still be a type to wrap the different versions. It will no longer
be generic over the transport type, to prepare for being able to wrap HTTP/3
connections. Exactly how it will wrap, either by using internal trait objects,
or an `enum Either` style, or using a `trait Connection` that each type
implements, is something to be determined. It's likely that this "auto" type
will start in `hyper-util`.
### Focus on the `Connection` level
As mentioned in the *Known Issues*, the higher-level `Client` and `Server` have
stability and complexity problems. Therefore, for hyper 1.0, the main API will
focus on the "lower-level" connection types. The `Client` and `Server` helpers
will be moved to `hyper-util`.
## Public API
### body
The `Body` struct is removed. Its internal "variants" are [separated into
distinct types](https://github.com/hyperium/hyper/issues/2345), and can start
in either `hyper-util` or `http-body-util`.
The exported trait `HttpBody` is renamed to `Body`.
A single `Body` implementation in `hyper` is the one provided by receiving
client responses and server requests. It has the name `Streaming`.
> **Unresolved**: Other names can be considered during implementation. Another
> option is to not publicly name the implementation, but return `Response<impl
Body>`s.
The `Body` trait will be experimented on to see about making it possible to
return more frame types beyonds just data and trailers.
> **Unresolved**: What exactly this looks like will only be known after
> experimentation.
### client
The high-level `hyper::Client` will be removed, along with the
`hyper::client::connect` module. They will be explored more in `hyper-util`.
As described in *Design*, the `client::conn` module will gain `http1` and
`http2` sub-modules, providing per-version `SendRequest`, `Connection`, and
`Builder` structs. An `auto` version can be explored in `hyper-util`.
### error
The `hyper::Error` struct remains in place.
All errors returned from `Error::source()` are made opaque. They are wrapped an
internal `Opaque` newtype that still allows printing, but prevents downcasting
to the internal dependency.
A new `hyper::error::Code` struct is defined. It is an opaque struct, with
associated constants defining various code variants.
> Alternative: define a non-exhaustive enum. It's not clear that this is
> definitely better, though. Keeping it an opaque struct means we can add
> secondary parts to the code in the future, or add bit flags, or similar
> extensions.
The purpose of `Code` is to provide an abstraction over the kind of error that
is encountered. The `Code` could be some behavior noticed inside hyper, such as
an incomplete HTTP message. Or it can be "translated" from the underlying
protocol, if it defines protocol level errors. For example, an
`h2::Reason::CANCEL`.
### rt
The `Executor` trait stays in here.
Define a new trait `Timer`, which describes a way for users to provide a source
of sleeping/timeout futures. Similar to `Executor`, a new generic is added to
connection builders to provide a `Timer`.
### server
The higher-level `hyper::Server` struct, its related `Builder`, and the
`Accept` trait are all removed.
The `AddrStream` struct will be completely removed, as it provides no value but
causes binary bloat.
Similar to `client`, and as describe in the *Design*, the `conn` modules will
be expanded to support `http1` and `http2` submodules. An `auto` version can be
explored in `hyper-util`.
### service
A vendored and simplified `Service` trait will be explored.
The error type for `Service`s used for a server will explore having the return
type changed from any error to one that can become a `hyper::error::Code`.
> **Unresolved**: Both of the above points are not set in stone. We will
> explore and decide if they are the best outcome during development.
The `MakeService` pieces will be removed.
### Cargo Features
Remove the `stream` feature. The `Stream` trait is not stable, and we cannot
depend on an unstable API.
Remove the `tcp` and `runtime` features. The automatic executor and timer parts
are handled by providing implementations of `Executor` and `Timer`. The
`connect` and `Accept` parts are also moving to `hyper-util`.
### Public Dependencies
- `http`
- `http-body`
- `bytes`
Cannot be public while "unstable":
- `tracing`
## `hyper-util`
### body
A channel implementation of `Body` that has an API to know when the data has
been successfully written is provided in `hyper_util::body::channel`.
### client
A `Pool` struct that implements `Service` is provided. It fills a similar role
as the previous `hyper::Client`.
> **Note**: The `Pool` might be something that goes into the `tower` crate
> instead. Or it might stay here as a slightly more specialized racing-connect
> pool. We'll find out as we go.
A `connect` submodule that mostly mirrors the existing `hyper::client::connect`
module is moved here. Connectors can be used as a source to provide `Service`s
used by the `Pool`.
### rt
We can provide Tokio-backed implementations of `Executor` and `Timer`.
### server
A `GracefulShutdown` helper is provided, to allow for similar style of graceful
shutdown as the previous `hyper::Server` did, but with better control.
# Appendix
## Unresolved Questions
There are some parts of the proposal which are not fully resolved. They are
mentioned in Design and API sections above, but also collected here for easy
finding. While they all have _plans_, they are more exploratory parts of the
API, and thus they have a higher possibility of changing as we implement them.
The goal is to have these questions resolved and removed from the document by
the time there is a [Release Candidate][timeline].
### Should there be `hyper::io` traits?
Depending on `tokio` just for `AsyncRead` and `AsyncWrite` is convenient, but
can be confusing for users integrating hyper with other runtimes. It also ties
our version directly to Tokio. We can consider having vendored traits, and
providing Tokio wrappers in `hyper-util`.
### Should returned body types be `impl Body`?
### How could the `Body` trait prepare for unknown frames?
We will experiment with this, and keep track of those experiments in a
dedicated issue. It might be possible to use something like this:
```rust
pub trait Body {
type Data;
fn poll_frame(..) -> Result<Option<Frame<Self::Data>>>;
}
pub struct Frame<T>(Kind<T>);
enum Kind<T> {
Data(T),
Trailers(HeaderMap),
Unknown(Box<dyn FrameThingy>),
}
```
### Should there be a simplified `hyper::Service` trait, or should hyper depend on `tower-service`?
- There's still a few uncertain decisions around tower, such as if it should be
changed to `async fn call`, and if `poll_ready` is the best way to handle
backpressure.
- It's not clear that the backpressure is something needed at the `Server`
boundary, thus meaning we should remove `poll_ready` from hyper.
- It's not 100% clear if we should keep the service pattern, or use a
pull-based API. This will be explored in a future blog post.
## FAQ
### Why did you pick _that_ name? Why not this other better name?
Naming is hard. We certainly should solve it, but discussion for particular
names for structs and traits should be scoped to the specific issues. This
document is to define the shape of the library API.
### Should I publicly depend on `hyper-util`?
The `hyper-util` crate will not reach 1.0 when `hyper` does. Some types and
traits are being moved to `hyper-util`. As with any pre-1.0 crate, you _can_
publicly depend on it, but it is explicitly less stable.
In most cases, it's recommended to not publicly expose your dependency on
`hyper-util`. If you depend on a trait, such as used by the moved higher-level
`Client` or `Server`, it may be better for your users to define your own
abstraction, and then make an internal adapter.
### Isn't this making hyper harder?
We are making hyper more **flexible**. As noted in the [VISION][], most use
cases of hyper require it to be flexible. That _can_ mean that the exposed API
is lower level, and that it feels more complicated. It should still be
**understandable**.
But the hyper 1.0 effort is more than just the single `hyper` crate. Many
useful helpers will be migrated to a `hyper-util` crate, and likely improved in
the process. The [timeline][] also points out that we will have a significant
documentation push. While the flexible pieces will be in hyper to compose how
they need, we will also write guides for the [hyper.rs][] showing people how to
accomplish the most common tasks.
[timeline]: https://seanmonstar.com/post/676912131372875776/hyper-10-timeline
[VISION]: https://github.com/hyperium/hyper/pull/2772
[hyper.rs]: https://hyper.rs

100
docs/TENETS.md Normal file
View File

@@ -0,0 +1,100 @@
# Charter
> hyper is a protective and efficient HTTP library for all.
# Tenets
Tenets are guiding principles. They guide how decisions are made for the whole
project. Ideally, we do all of them all the time. In some cases, though, we may
be forced to decide between slightly penalizing one goal or another. In that
case, we tend to support those goals that come earlier in the list over those
that come later (but every case is different).
## 0. Open
hyper is open source, always. The success of hyper depends on the health of the
community building and using it. All contributions are in the open. We don't
maintain private versions, and don't include features that aren't useful to
others.
[We prioritize kindness][CONDUCT], compassion and empathy towards all
contributors. Technical skill is not a substitute for human decency.
[CONDUCT]: https://github.com/hyperium/hyper/blob/master/docs/CODE_OF_CONDUCT.md
### Examples
It's not usually hard for an open source library to stay open and also meet its
other priorities. Here's some instances where being **Open** would be more
important than **Correct** or **Fast**:
- Say an individual were to bring forward a contribution that makes hyper more
correct, or faster, perhaps fixing some serious bug. But in doing so, they
also insulted people, harassed other contributors or users, or shamed
everyone for the previous code. They felt their contribution was "invaluable".
We would not accept such a contribution, instead banning the user and
rewriting the code amongst the kind collaborators of the project.
- Say someone brings a contribution that adds a new feature useful for
performance or correctness, but their work accomplishes this by integrating
hyper with a proprietary library. We would not accept such a contribution,
because we don't want such a feature limited only to those users willing to
compromise openness, and we don't want to bifurcate the ecosystem between those
who make that compromise and those who don't.
## 1. Correct
hyper is a memory safe and precise implementation of the HTTP specification.
Memory safety is vital in a core Internet technology. Following the HTTP
specifications correctly protects users. It makes the software durable to the
“real world”. Where feasible, hyper enforces correct usage.
This is more than just "don't write bugs". hyper actively protects the user.
### Examples
- Even though we follow the **HTTP/\*** specs, hyper doesn't blindly implement
everything without considering if it is safe to do so.
## 2. Fast
A fast experience delights users. A faster network library means a faster
application, resulting in delighting our users users. Whether with one request,
or millions.
Being _fast_ means we improve throughput, drive down CPU usage, and improve
sustainability.
Fast _enough_. We don't sacrifice sanity for speed.
## 3. HTTP/*
hyper is specifically focused on HTTP. Supporting new HTTP versions is in scope,
but supporting separate protocols is not.
This also defines what the abstraction layer is: the API is designed around
sending and receiving HTTP messages.
## 4. Flexible
hyper enables as many usecases as possible. It has no opinion on application
structure, and makes few assumptions about its environment. This includes being
portable to different operating systems.
### Examples
- While we choose safer defaults to be **Correct**, hyper includes options to
_allow_ different behavior, when the user requires them.
- Providing choice usually makes things more complex, so being **Flexible** does
mean it's less _easy_. That can sometimes conflict with simplest way of making
hyper **Understandable**.
## 5. Understandable
hyper is [no more complicated than it has to
be](https://en.wikipedia.org/wiki/Occam%27s_razor). HTTP is not simple. It may
not be as "easy" as 1-line to do everything, but it shouldn't be "hard" to find
the answers.
From logical and misuse-resistant APIs, to stellar documentation, to transparent
metrics.

230
docs/VISION.md Normal file
View File

@@ -0,0 +1,230 @@
# hyper Vision
## Purpose
This is an overview of what the shape of hyper looks like, but also somewhat
zoomed out, so that the _vision_ can survive while the exact minute details
might shift and change over time.
### Charter
> hyper is a protective and efficient HTTP library for all.
### Tenets
Tenets are guiding principles. They guide how decisions are made for the whole
project. Ideally, we do all of them all the time. In some cases, though, we may
be forced to decide between slightly penalizing one goal or another. In that
case, we tend to support those goals that come earlier in the list over those
that come later (but every case is different).
0. Open
1. Correct
2. Fast
3. HTTP/\*
4. Flexible
5. Understandable
There's a lot more detail about each in [TENETS](./TENETS.md).
## Use Cases
Who are the *users* of hyper? How would they use hyper?
### Low-Level Client Library (curl, reqwest, aws-sdk)
These client libraries care that hyper is **Flexible**, since they are
expressing their own opinion on how a more-featured HTTP client should act.
This includes opinions on connection establishment, management, pooling, HTTP
version options, and even runtimes.
curl's main reason for using hyper is that it is **Safe**.
### Web Server Frameworks (deno, axum)
These are using hyper's server feature to expose a different, higher-level API
to users. Besides the obvious requirements, these require that hyper is
**Fast**. Servers are costly, handling more requests faster is important to
them.
That hyper is **Flexible** is also important, in that it needs to be flexible
enough for them to build a server framework, and allow them to express their
own opinions about API to their users.
### Services and Proxies (linkerd, cloudflare, fastly)
These are using hyper directly, likely both the client and server, in order to
build efficient and powerful services, applications, and tools for their end
users. They care greatly that hyper is **Correct**, since web traffic can
stretch the limits of what is valid HTTP, and exercise less-common parts of the
specifications.
They also require hyper to be **Fast**, for similar reasons that the web server
frameworks do.
### New Rust Web Developers
These are developers who are either new to Rust, or new to web servers, and
have reached for hyper to start with.
It's likely that these users don't have strong opinions about how an HTTP
server or client should work, just that it _should_ handle all the things they
normally assume it would. For these users, it would be best to quickly help
them compare their own expectations with hyper's capabilities, and may
suggest reaching for higher-level, _easier_ libraries instead.
Those that stick around after that recommendation are users that wish both to
learn at a lower level, and to pick and choose what batteries they plug in to
hyper as they move along. While they do care about the other tenets, that hyper
is **Understandable** is of extra importance to them.
## The Library
So with all that context in mind, what does hyper, the library, actually look
like? This doesn't highlight what _is_ and _isn't_ present. What currently
needs to change to reach this vision is left to individual version roadmaps.
### Layers
In all cases, a user brings their own runtime and IO to work with hyper. The IO
is provided to hyper, and hyper acts on top of it. hyper returns `Future`s that
the user then decides how to poll, likely involving their runtime options.
![architecture diagram](./vision-arch.svg)
#### Protocol Codecs
hyper has dedicated codecs for the major HTTP versions. Each is internally
designed to be **Correct** and **Fast** when it comes to encoding and decoding.
The individual codecs may be implemented as sub-crates, with a less-stable
promise, to support the **Flexible** needs of some users who wish to build
their own connection management, or customize encoding and decoding beyond what
is officially supported.
#### Connection State Management
A **Correct** implementation includes more than just enforcing certain
characters when encoding and decoding. Order of frames, and flags in certain
frames can affect the state of the connection. Some examples of things enforced
at this layer:
- If a message has a `content-length`, enforce only that many bytes are read or
written.
- Reading a `Response` before a `Request` is even written implies a mismatched
reply that should be interpreted as an error.
- The presence of some headers, such as `Connection: close`, or the absence of
others, such as `content-length` and `transfer-encoding`, can mean that the
connection should terminate after the current message.
- HTTP/2 and HTTP/3 may send connection-level frames that don't pertain to any
specific transaction, and must be read and handled regardless of if a user is
currently checking for a message.
#### HTTP Role and Version Abstraction
This is the public API layer. Methods exposed are around sending and receiving
`http::Request`s and `http::Response`s, not around framing specifics of the
different versions. These are built around a client or server `Connection`
interface.
By exposing this layer publicly, we take care of the **Correct** tenet, by not
forcing the user to send the specific frames themselves. The API should be
designed in a way that a user cannot easily (if at all) create an _incorrect_
HTTP connection.
Motivated by the **Flexible** tenet, there _are_ version-specific options that
can be configured at this level, and version-specific functionality can usually
be handled via `http::Extensions`.
### Not quite stable, but utile (useful)
Beyond what is directly in the hyper crate, there are useful (utile) parts that
may not meet hyper's stability promise. Developing, experimenting, and exposing
those parts is the purpose of the `hyper-util` crate. That crate does not have
the same stability level as hyper. However, the goal is that things that other
libraries might want to expose as a public dependency do not live in
`hyper-util` forever, but rather stabilize and get promoted into `hyper`.
Exactly what gets put into `hyper-util` presently is kept in the roadmap
documents.
### Stability Promise
What even is hyper's stability promise? Does it mean we are "done"? No. Will we
ever make breaking changes again? Probably. We'll still follow the [semantic
versioning](https://semver.org).
Prior to 1.0, hyper has already only done breaking changes once a year. So 1
year isn't much of a promise. We'll have significant more use and understanding
after a few years, and that could prompt some redesign.
As of this writing, we'll promise that _major_ versions of hyper are stable for
3 years. New features will come out in _minor_ versions frequently. If it is
determined necessary to make breaking changes to the API, we'll save them for
after the 3 years.
hyper also establishes a Minimum Supported Rust Version (MSRV). hyper will
support Rust versions at least 6 months old. If a new Rust version is released
with a feature hyper wishes to use, we won't do so until at least 6 months
afterwards. hyper will only ever require a new Rust version as a _minor_
release (1.x), not as a patch (1.x.y).
## Security
The security of hyper is a large part of what makes hyper _protective_. We make
hyper secure via the combined efforts of being **Correct**, focusing on
**HTTP/\***, and making it all **Understandable**.
### Memory Safety
Being **Correct** requires that hyper be memory-safe. Using the Rust language
gets us most of the way there. But there is the ability to write `unsafe`
Rust. Does being **Correct** mean that we can _never_ write `unsafe` code
anywhere? Even if it helps make hyper **Fast**? We can, carefully.
How do we balance the two, so that hyper is secure?
hyper prefers not to have large modules of intertwined `unsafe` code. hyper
does allow small `unsafe` blocks, no more than a few lines, where it's easier
to verify that the `unsafe` code was written **Correctly**.
### Meticulous Testing
hyper's test suite grows and grows. There's a lot that needs to be right.
Parsers, encoders, state machines. When easily isolated, those pieces have
internal unit tests. But hyper also keeps a large list of growing integration
tests that make sure all the parts are **Correct**.
Making writing new tests easy is a high priority. Investing in the testing
infrastructure is a proven way to make sure hyper stays **Correct** and secure.
### Constant Fuzzing
One thing is to know specific cases to test for. But we can't know all the
inputs or states that *might* cause a bug. That's why hyper has rounds of
fuzzing built into its CI. It's also why hyper signs up for and uses resources
to provide *constant*, around-the-clock fuzzing, always looking for something
that hyper should be hardened against.
### Security Process
hyper has an outlined
[SECURITY](https://github.com/hyperium/hyper/blob/master/SECURITY.md) process,
so we can safely report and fix issues.
## Non-goals
After writing this up, it is easier to articulate what sorts of things many
might associate with an HTTP library, but which are explicitly *not* for hyper.
These are all things that definitely **out of scope**.
- TLS: We learned early that bundling TLS directly in hyper [has
problems](https://github.com/hyperium/hyper/issues/985). People also have
very strong opinions about which TLS implementation to use. The design of
hyper allows users to bring their own TLS.
- Routing
- Cookies
- Not-HTTP: WebSockets, or other protocols that are built next to HTTP. It
should be possible to _use_ hyper to upgrade, but the actual next-protocol
should be handled by a different library.

4
docs/vision-arch.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 9.7 KiB

View File

@@ -1,6 +1,5 @@
#![deny(warnings)]
use futures_util::TryStreamExt;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
@@ -16,16 +15,17 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
// Simply echo the body back to the client.
(&Method::POST, "/echo") => Ok(Response::new(req.into_body())),
// TODO: Fix this, broken in PR #2896
// Convert to uppercase before sending back to client using a stream.
(&Method::POST, "/echo/uppercase") => {
let chunk_stream = req.into_body().map_ok(|chunk| {
chunk
.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});
Ok(Response::new(Body::wrap_stream(chunk_stream)))
}
// (&Method::POST, "/echo/uppercase") => {
// let chunk_stream = req.into_body().map_ok(|chunk| {
// chunk
// .iter()
// .map(|byte| byte.to_ascii_uppercase())
// .collect::<Vec<u8>>()
// });
// Ok(Response::new(Body::wrap_stream(chunk_stream)))
// }
// Reverse the entire body before sending back to the client.
//

View File

@@ -1,9 +1,5 @@
#![deny(warnings)]
use tokio::fs::File;
use tokio_util::codec::{BytesCodec, FramedRead};
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Result, Server, StatusCode};
@@ -48,11 +44,8 @@ fn not_found() -> Response<Body> {
}
async fn simple_file_send(filename: &str) -> Result<Response<Body>> {
// Serve a file by asynchronously reading it by chunks using tokio-util crate.
if let Ok(file) = File::open(filename).await {
let stream = FramedRead::new(file, BytesCodec::new());
let body = Body::wrap_stream(stream);
if let Ok(contents) = tokio::fs::read(filename).await {
let body = contents.into();
return Ok(Response::new(body));
}

View File

@@ -1,7 +1,6 @@
#![deny(warnings)]
use bytes::Buf;
use futures_util::{stream, StreamExt};
use hyper::client::HttpConnector;
use hyper::service::{make_service_fn, service_fn};
use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode};
@@ -24,18 +23,10 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
.unwrap();
let web_res = client.request(req).await?;
// Compare the JSON we sent (before) with what we received (after):
let before = stream::once(async {
Ok(format!(
"<b>POST request body</b>: {}<br><b>Response</b>: ",
POST_DATA,
)
.into())
});
let after = web_res.into_body();
let body = Body::wrap_stream(before.chain(after));
Ok(Response::new(body))
let res_body = web_res.into_body();
Ok(Response::new(res_body))
}
async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {

View File

@@ -1,20 +1,14 @@
use std::borrow::Cow;
#[cfg(feature = "stream")]
use std::error::Error as StdError;
use std::fmt;
use bytes::Bytes;
use futures_channel::mpsc;
use futures_channel::oneshot;
use futures_core::Stream; // for mpsc::Receiver
#[cfg(feature = "stream")]
use futures_util::TryStreamExt;
use http::HeaderMap;
use http_body::{Body as HttpBody, SizeHint};
use super::DecodedLength;
#[cfg(feature = "stream")]
use crate::common::sync_wrapper::SyncWrapper;
use crate::common::Future;
#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
use crate::common::Never;
@@ -56,12 +50,6 @@ enum Kind {
},
#[cfg(feature = "ffi")]
Ffi(crate::ffi::UserBody),
#[cfg(feature = "stream")]
Wrapped(
SyncWrapper<
Pin<Box<dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send>>,
>,
),
}
struct Extra {
@@ -164,39 +152,6 @@ impl Body {
(tx, rx)
}
/// Wrap a futures `Stream` in a box inside `Body`.
///
/// # Example
///
/// ```
/// # use hyper::Body;
/// let chunks: Vec<Result<_, std::io::Error>> = vec![
/// Ok("hello"),
/// Ok(" "),
/// Ok("world"),
/// ];
///
/// let stream = futures_util::stream::iter(chunks);
///
/// let body = Body::wrap_stream(stream);
/// ```
///
/// # Optional
///
/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
#[cfg(feature = "stream")]
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
pub fn wrap_stream<S, O, E>(stream: S) -> Body
where
S: Stream<Item = Result<O, E>> + Send + 'static,
O: Into<Bytes> + 'static,
E: Into<Box<dyn StdError + Send + Sync>> + 'static,
{
let mapped = stream.map_ok(Into::into).map_err(Into::into);
Body::new(Kind::Wrapped(SyncWrapper::new(Box::pin(mapped))))
}
fn new(kind: Kind) -> Body {
Body { kind, extra: None }
}
@@ -329,12 +284,6 @@ impl Body {
#[cfg(feature = "ffi")]
Kind::Ffi(ref mut body) => body.poll_data(cx),
#[cfg(feature = "stream")]
Kind::Wrapped(ref mut s) => match ready!(s.get_mut().as_mut().poll_next(cx)) {
Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))),
None => Poll::Ready(None),
},
}
}
@@ -405,8 +354,6 @@ impl HttpBody for Body {
Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),
#[cfg(feature = "ffi")]
Kind::Ffi(..) => false,
#[cfg(feature = "stream")]
Kind::Wrapped(..) => false,
}
}
@@ -426,8 +373,6 @@ impl HttpBody for Body {
match self.kind {
Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64),
Kind::Once(None) => SizeHint::with_exact(0),
#[cfg(feature = "stream")]
Kind::Wrapped(..) => SizeHint::default(),
Kind::Chan { content_length, .. } => opt_len!(content_length),
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { content_length, .. } => opt_len!(content_length),
@@ -457,33 +402,6 @@ impl fmt::Debug for Body {
}
}
/// # Optional
///
/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
#[cfg(feature = "stream")]
impl Stream for Body {
type Item = crate::Result<Bytes>;
fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
HttpBody::poll_data(self, cx)
}
}
/// # Optional
///
/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
#[cfg(feature = "stream")]
impl From<Box<dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send>> for Body {
#[inline]
fn from(
stream: Box<dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send>,
) -> Body {
Body::new(Kind::Wrapped(SyncWrapper::new(stream.into())))
}
}
impl From<Bytes> for Body {
#[inline]
fn from(chunk: Bytes) -> Body {

View File

@@ -17,7 +17,7 @@ use super::HttpBody;
/// # Example
///
/// ```
/// # #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
/// # #[cfg(all(feature = "client", feature = "tcp", any(feature = "http1", feature = "http2")))]
/// # async fn doc() -> hyper::Result<()> {
/// use hyper::{body::HttpBody};
///

504
src/client/conn/http1.rs Normal file
View File

@@ -0,0 +1,504 @@
//! HTTP/1 client connections
use std::error::Error as StdError;
use std::fmt;
use std::sync::Arc;
use http::{Request, Response};
use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
use crate::Body;
use crate::body::HttpBody;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
};
use crate::upgrade::Upgraded;
use crate::proto;
use crate::rt::Executor;
use super::super::dispatch;
type Dispatcher<T, B> =
proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
/// The sender side of an established connection.
pub struct SendRequest<B> {
dispatch: dispatch::Sender<Request<B>, Response<Body>>,
}
/// A future that processes all HTTP state for the IO object.
///
/// In most cases, this should just be spawned into an executor, so that it
/// can process incoming and outgoing messages, notice hangups, and the like.
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
B: HttpBody + 'static,
{
inner: Option<Dispatcher<T, B>>,
}
/// A builder to configure an HTTP connection.
///
/// After setting options, the builder is used to create a handshake future.
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
h09_responses: bool,
h1_parser_config: ParserConfig,
h1_writev: Option<bool>,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
#[cfg(feature = "ffi")]
h1_preserve_header_order: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
}
/// Returns a handshake future over some IO.
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
pub async fn handshake<T>(
io: T,
) -> crate::Result<(SendRequest<crate::Body>, Connection<T, crate::Body>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Builder::new().handshake(io).await
}
// ===== impl SendRequest
impl<B> SendRequest<B> {
/// Polls to determine whether this sender can be used yet for a request.
///
/// If the associated connection is closed, this returns an Error.
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.dispatch.poll_ready(cx)
}
/*
pub(super) async fn when_ready(self) -> crate::Result<Self> {
let mut me = Some(self);
future::poll_fn(move |cx| {
ready!(me.as_mut().unwrap().poll_ready(cx))?;
Poll::Ready(Ok(me.take().unwrap()))
})
.await
}
pub(super) fn is_ready(&self) -> bool {
self.dispatch.is_ready()
}
pub(super) fn is_closed(&self) -> bool {
self.dispatch.is_closed()
}
*/
}
impl<B> SendRequest<B>
where
B: HttpBody + 'static,
{
/// Sends a `Request` on the associated connection.
///
/// Returns a future that if successful, yields the `Response`.
///
/// # Note
///
/// There are some key differences in what automatic things the `Client`
/// does for you that will not be done here:
///
/// - `Client` requires absolute-form `Uri`s, since the scheme and
/// authority are needed to connect. They aren't required here.
/// - Since the `Client` requires absolute-form `Uri`s, it can add
/// the `Host` header based on it. You must add a `Host` header yourself
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
///
/// # Example
///
/// ```
/// # use http::header::HOST;
/// # use hyper::client::conn::SendRequest;
/// # use hyper::Body;
/// use hyper::Request;
///
/// # async fn doc(mut tx: SendRequest<Body>) -> hyper::Result<()> {
/// // build a Request
/// let req = Request::builder()
/// .uri("/foo/bar")
/// .header(HOST, "hyper.rs")
/// .body(Body::empty())
/// .unwrap();
///
/// // send it and await a Response
/// let res = tx.send_request(req).await?;
/// // assert the Response
/// assert!(res.status().is_success());
/// # Ok(())
/// # }
/// # fn main() {}
/// ```
pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Body>>> {
let sent = self.dispatch.send(req);
async move {
match sent {
Ok(rx) => match rx.await {
Ok(Ok(resp)) => Ok(resp),
Ok(Err(err)) => Err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_canceled) => panic!("dispatch dropped without returning error"),
}
Err(_req) => {
tracing::debug!("connection was not ready");
Err(crate::Error::new_canceled().with("connection was not ready"))
}
}
}
}
/*
pub(super) fn send_request_retryable(
&mut self,
req: Request<B>,
) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
where
B: Send,
{
match self.dispatch.try_send(req) {
Ok(rx) => {
Either::Left(rx.then(move |res| {
match res {
Ok(Ok(res)) => future::ok(res),
Ok(Err(err)) => future::err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_) => panic!("dispatch dropped without returning error"),
}
}))
}
Err(req) => {
tracing::debug!("connection was not ready");
let err = crate::Error::new_canceled().with("connection was not ready");
Either::Right(future::err((err, Some(req))))
}
}
}
*/
}
impl<B> fmt::Debug for SendRequest<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SendRequest").finish()
}
}
// ===== impl Connection
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
B: HttpBody + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
}
}
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? {
proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
proto::Dispatched::Upgrade(pending) => match self.inner.take() {
Some(h1) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
Poll::Ready(Ok(()))
}
_ => {
drop(pending);
unreachable!("Upgraded twice");
}
},
}
}
}
// ===== impl Builder
impl Builder {
/// Creates a new connection builder.
#[inline]
pub fn new() -> Builder {
Builder {
exec: Exec::Default,
h09_responses: false,
h1_writev: None,
h1_read_buf_exact_size: None,
h1_parser_config: Default::default(),
h1_title_case_headers: false,
h1_preserve_header_case: false,
#[cfg(feature = "ffi")]
h1_preserve_header_order: false,
h1_max_buf_size: None,
}
}
/// Provide an executor to execute background HTTP2 tasks.
pub fn executor<E>(&mut self, exec: E) -> &mut Builder
where
E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.exec = Exec::Executor(Arc::new(exec));
self
}
/// Set whether HTTP/0.9 responses should be tolerated.
///
/// Default is false.
pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
self.h09_responses = enabled;
self
}
/// Set whether HTTP/1 connections will accept spaces between header names
/// and the colon that follow them in responses.
///
/// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
/// to say about it:
///
/// > No whitespace is allowed between the header field-name and colon. In
/// > the past, differences in the handling of such whitespace have led to
/// > security vulnerabilities in request routing and response handling. A
/// > server MUST reject any received request message that contains
/// > whitespace between a header field-name and colon with a response code
/// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
/// > response message before forwarding the message downstream.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
///
/// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
pub fn http1_allow_spaces_after_header_name_in_responses(
&mut self,
enabled: bool,
) -> &mut Builder {
self.h1_parser_config
.allow_spaces_after_header_name_in_responses(enabled);
self
}
/// Set whether HTTP/1 connections will accept obsolete line folding for
/// header values.
///
/// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
/// parsing.
///
/// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
/// to say about it:
///
/// > A server that receives an obs-fold in a request message that is not
/// > within a message/http container MUST either reject the message by
/// > sending a 400 (Bad Request), preferably with a representation
/// > explaining that obsolete line folding is unacceptable, or replace
/// > each received obs-fold with one or more SP octets prior to
/// > interpreting the field value or forwarding the message downstream.
///
/// > A proxy or gateway that receives an obs-fold in a response message
/// > that is not within a message/http container MUST either discard the
/// > message and replace it with a 502 (Bad Gateway) response, preferably
/// > with a representation explaining that unacceptable line folding was
/// > received, or replace each received obs-fold with one or more SP
/// > octets prior to interpreting the field value or forwarding the
/// > message downstream.
///
/// > A user agent that receives an obs-fold in a response message that is
/// > not within a message/http container MUST replace each received
/// > obs-fold with one or more SP octets prior to interpreting the field
/// > value.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
///
/// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
pub fn http1_allow_obsolete_multiline_headers_in_responses(
&mut self,
enabled: bool,
) -> &mut Builder {
self.h1_parser_config
.allow_obsolete_multiline_headers_in_responses(enabled);
self
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
///
/// Note that setting this to false may mean more copies of body data,
/// but may also improve performance when an IO transport doesn't
/// support vectored writes well, such as most TLS implementations.
///
/// Setting this to true will force hyper to use queued strategy
/// which may eliminate unnecessary cloning on some TLS backends
///
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
self.h1_writev = Some(enabled);
self
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder {
self.h1_title_case_headers = enabled;
self
}
/// Set whether to support preserving original header cases.
///
/// Currently, this will record the original cases received, and store them
/// in a private extension on the `Response`. It will also look for and use
/// such an extension in any provided `Request`.
///
/// Since the relevant extension is still private, there is no way to
/// interact with the original cases. The only effect this can have now is
/// to forward the cases in a proxy-like fashion.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_case = enabled;
self
}
/// Set whether to support preserving original header order.
///
/// Currently, this will record the order in which headers are received, and store this
/// ordering in a private extension on the `Response`. It will also look for and use
/// such an extension in any provided `Request`.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
#[cfg(feature = "ffi")]
pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_order = enabled;
self
}
/// Sets the exact size of the read buffer to *always* use.
///
/// Note that setting this option unsets the `http1_max_buf_size` option.
///
/// Default is an adaptive read buffer.
pub fn http1_read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
self.h1_read_buf_exact_size = sz;
self.h1_max_buf_size = None;
self
}
/// Set the maximum buffer size for the connection.
///
/// Default is ~400kb.
///
/// Note that setting this option unsets the `http1_read_exact_buf_size` option.
///
/// # Panics
///
/// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
assert!(
max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
"the max_buf_size cannot be smaller than the minimum that h1 specifies."
);
self.h1_max_buf_size = Some(max);
self.h1_read_buf_exact_size = None;
self
}
/// Constructs a connection with the configured options and IO.
/// See [`client::conn`](crate::client::conn) for more.
///
/// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
/// do nothing.
pub fn handshake<T, B>(
&self,
io: T,
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: HttpBody + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
let opts = self.clone();
async move {
tracing::trace!("client handshake HTTP/1");
let (tx, rx) = dispatch::channel();
let mut conn = proto::Conn::new(io);
conn.set_h1_parser_config(opts.h1_parser_config);
if let Some(writev) = opts.h1_writev {
if writev {
conn.set_write_strategy_queue();
} else {
conn.set_write_strategy_flatten();
}
}
if opts.h1_title_case_headers {
conn.set_title_case_headers();
}
if opts.h1_preserve_header_case {
conn.set_preserve_header_case();
}
#[cfg(feature = "ffi")]
if opts.h1_preserve_header_order {
conn.set_preserve_header_order();
}
if opts.h09_responses {
conn.set_h09_responses();
}
if let Some(sz) = opts.h1_read_buf_exact_size {
conn.set_read_buf_exact_size(sz);
}
if let Some(max) = opts.h1_max_buf_size {
conn.set_max_buf_size(max);
}
let cd = proto::h1::dispatch::Client::new(rx);
let proto = proto::h1::Dispatcher::new(cd, conn);
Ok((
SendRequest { dispatch: tx },
Connection { inner: Some(proto) },
))
}
}
}

432
src/client/conn/http2.rs Normal file
View File

@@ -0,0 +1,432 @@
//! HTTP/2 client connections
use std::error::Error as StdError;
use std::fmt;
use std::marker::PhantomData;
use std::sync::Arc;
#[cfg(feature = "runtime")]
use std::time::Duration;
use http::{Request, Response};
use tokio::io::{AsyncRead, AsyncWrite};
use crate::Body;
use crate::body::HttpBody;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
};
use crate::proto;
use crate::rt::Executor;
use super::super::dispatch;
/// The sender side of an established connection.
pub struct SendRequest<B> {
dispatch: dispatch::Sender<Request<B>, Response<Body>>,
}
/// A future that processes all HTTP state for the IO object.
///
/// In most cases, this should just be spawned into an executor, so that it
/// can process incoming and outgoing messages, notice hangups, and the like.
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
B: HttpBody + 'static,
{
inner: (PhantomData<T>, proto::h2::ClientTask<B>),
}
/// A builder to configure an HTTP connection.
///
/// After setting options, the builder is used to create a handshake future.
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
h2_builder: proto::h2::client::Config,
}
/// Returns a handshake future over some IO.
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
pub async fn handshake<T>(
io: T,
) -> crate::Result<(SendRequest<crate::Body>, Connection<T, crate::Body>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Builder::new().handshake(io).await
}
// ===== impl SendRequest
impl<B> SendRequest<B> {
/// Polls to determine whether this sender can be used yet for a request.
///
/// If the associated connection is closed, this returns an Error.
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.dispatch.poll_ready(cx)
}
/*
pub(super) async fn when_ready(self) -> crate::Result<Self> {
let mut me = Some(self);
future::poll_fn(move |cx| {
ready!(me.as_mut().unwrap().poll_ready(cx))?;
Poll::Ready(Ok(me.take().unwrap()))
})
.await
}
pub(super) fn is_ready(&self) -> bool {
self.dispatch.is_ready()
}
pub(super) fn is_closed(&self) -> bool {
self.dispatch.is_closed()
}
*/
}
impl<B> SendRequest<B>
where
B: HttpBody + 'static,
{
/// Sends a `Request` on the associated connection.
///
/// Returns a future that if successful, yields the `Response`.
///
/// # Note
///
/// There are some key differences in what automatic things the `Client`
/// does for you that will not be done here:
///
/// - `Client` requires absolute-form `Uri`s, since the scheme and
/// authority are needed to connect. They aren't required here.
/// - Since the `Client` requires absolute-form `Uri`s, it can add
/// the `Host` header based on it. You must add a `Host` header yourself
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
///
/// # Example
///
/// ```
/// # use http::header::HOST;
/// # use hyper::client::conn::SendRequest;
/// # use hyper::Body;
/// use hyper::Request;
///
/// # async fn doc(mut tx: SendRequest<Body>) -> hyper::Result<()> {
/// // build a Request
/// let req = Request::builder()
/// .uri("/foo/bar")
/// .header(HOST, "hyper.rs")
/// .body(Body::empty())
/// .unwrap();
///
/// // send it and await a Response
/// let res = tx.send_request(req).await?;
/// // assert the Response
/// assert!(res.status().is_success());
/// # Ok(())
/// # }
/// # fn main() {}
/// ```
pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Body>>> {
let sent = self.dispatch.send(req);
async move {
match sent {
Ok(rx) => match rx.await {
Ok(Ok(resp)) => Ok(resp),
Ok(Err(err)) => Err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_canceled) => panic!("dispatch dropped without returning error"),
}
Err(_req) => {
tracing::debug!("connection was not ready");
Err(crate::Error::new_canceled().with("connection was not ready"))
}
}
}
}
/*
pub(super) fn send_request_retryable(
&mut self,
req: Request<B>,
) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
where
B: Send,
{
match self.dispatch.try_send(req) {
Ok(rx) => {
Either::Left(rx.then(move |res| {
match res {
Ok(Ok(res)) => future::ok(res),
Ok(Err(err)) => future::err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_) => panic!("dispatch dropped without returning error"),
}
}))
}
Err(req) => {
tracing::debug!("connection was not ready");
let err = crate::Error::new_canceled().with("connection was not ready");
Either::Right(future::err((err, Some(req))))
}
}
}
*/
}
impl<B> fmt::Debug for SendRequest<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SendRequest").finish()
}
}
// ===== impl Connection
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
B: HttpBody + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
}
}
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match ready!(Pin::new(&mut self.inner.1).poll(cx))? {
proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
#[cfg(feature = "http1")]
proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"),
}
}
}
// ===== impl Builder
impl Builder {
/// Creates a new connection builder.
#[inline]
pub fn new() -> Builder {
Builder {
exec: Exec::Default,
h2_builder: Default::default(),
}
}
/// Provide an executor to execute background HTTP2 tasks.
pub fn executor<E>(&mut self, exec: E) -> &mut Builder
where
E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.exec = Exec::Executor(Arc::new(exec));
self
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_stream_window_size = sz;
}
self
}
/// Sets the max connection-level flow control for HTTP2
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_initial_connection_window_size(
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_conn_window_size = sz;
}
self
}
/// Sets whether to use an adaptive flow control.
///
/// Enabling this will override the limits set in
/// `http2_initial_stream_window_size` and
/// `http2_initial_connection_window_size`.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
use proto::h2::SPEC_WINDOW_SIZE;
self.h2_builder.adaptive_window = enabled;
if enabled {
self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
}
self
}
/// Sets the maximum frame size to use for HTTP2.
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.max_frame_size = sz;
}
self
}
/// Sets an interval for HTTP2 Ping frames should be sent to keep a
/// connection alive.
///
/// Pass `None` to disable HTTP2 keep-alive.
///
/// Default is currently disabled.
///
/// # Cargo Feature
///
/// Requires the `runtime` cargo feature to be enabled.
#[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_interval(
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
self.h2_builder.keep_alive_interval = interval.into();
self
}
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
/// If the ping is not acknowledged within the timeout, the connection will
/// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
///
/// Default is 20 seconds.
///
/// # Cargo Feature
///
/// Requires the `runtime` cargo feature to be enabled.
#[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.h2_builder.keep_alive_timeout = timeout;
self
}
/// Sets whether HTTP2 keep-alive should apply while the connection is idle.
///
/// If disabled, keep-alive pings are only sent while there are open
/// request/responses streams. If enabled, pings are also sent when no
/// streams are active. Does nothing if `http2_keep_alive_interval` is
/// disabled.
///
/// Default is `false`.
///
/// # Cargo Feature
///
/// Requires the `runtime` cargo feature to be enabled.
#[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
self.h2_builder.keep_alive_while_idle = enabled;
self
}
/// Sets the maximum number of HTTP2 concurrent locally reset streams.
///
/// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
/// details.
///
/// The default value is determined by the `h2` crate.
///
/// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
self.h2_builder.max_concurrent_reset_streams = Some(max);
self
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
/// Default is currently 1MB, but may change.
///
/// # Panics
///
/// The value must be no larger than `u32::MAX`.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.h2_builder.max_send_buffer_size = max;
self
}
/// Constructs a connection with the configured options and IO.
/// See [`client::conn`](crate::client::conn) for more.
///
/// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
/// do nothing.
pub fn handshake<T, B>(
&self,
io: T,
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: HttpBody + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
let opts = self.clone();
async move {
tracing::trace!("client handshake HTTP/1");
let (tx, rx) = dispatch::channel();
let h2 =
proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec)
.await?;
Ok((
SendRequest { dispatch: tx },
Connection { inner: (PhantomData, h2) },
))
}
}
}

View File

@@ -84,6 +84,11 @@ use crate::rt::Executor;
use crate::upgrade::Upgraded;
use crate::{Body, Request, Response};
#[cfg(feature = "http1")]
pub mod http1;
#[cfg(feature = "http2")]
pub mod http2;
#[cfg(feature = "http1")]
type Http1Dispatcher<T, B> =
proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
@@ -156,6 +161,8 @@ pub struct Builder {
h1_writev: Option<bool>,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
#[cfg(feature = "ffi")]
h1_preserve_header_order: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
#[cfg(feature = "ffi")]
@@ -492,7 +499,7 @@ where
///
/// This setting is configured by the server peer by sending the
/// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
/// This method returns the currently acknowledged value recieved from the
/// This method returns the currently acknowledged value received from the
/// remote.
///
/// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
@@ -558,6 +565,8 @@ impl Builder {
h1_parser_config: Default::default(),
h1_title_case_headers: false,
h1_preserve_header_case: false,
#[cfg(feature = "ffi")]
h1_preserve_header_order: false,
h1_max_buf_size: None,
#[cfg(feature = "ffi")]
h1_headers_raw: false,
@@ -704,6 +713,21 @@ impl Builder {
self
}
/// Set whether to support preserving original header order.
///
/// Currently, this will record the order in which headers are received, and store this
/// ordering in a private extension on the `Response`. It will also look for and use
/// such an extension in any provided `Request`.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
#[cfg(feature = "ffi")]
pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_order = enabled;
self
}
/// Sets the exact size of the read buffer to *always* use.
///
/// Note that setting this option unsets the `http1_max_buf_size` option.
@@ -951,6 +975,10 @@ impl Builder {
if opts.h1_preserve_header_case {
conn.set_preserve_header_case();
}
#[cfg(feature = "ffi")]
if opts.h1_preserve_header_order {
conn.set_preserve_header_order();
}
if opts.h09_responses {
conn.set_h09_responses();
}

View File

@@ -1,7 +1,6 @@
#[cfg(feature = "http2")]
use std::future::Future;
use futures_util::FutureExt;
use tokio::sync::{mpsc, oneshot};
#[cfg(feature = "http2")]
@@ -169,6 +168,7 @@ impl<T, U> Receiver<T, U> {
#[cfg(feature = "http1")]
pub(crate) fn try_recv(&mut self) -> Option<(T, Callback<T, U>)> {
use futures_util::FutureExt;
match self.inner.recv().now_or_never() {
Some(Some(mut env)) => env.0.take(),
_ => None,

View File

@@ -11,7 +11,7 @@ use crate::body::HttpBody;
use crate::proto::h2::server::H2Stream;
use crate::rt::Executor;
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
use crate::server::conn::spawn_all::{NewSvcTask, Watcher};
use crate::server::server::{new_svc::NewSvcTask, Watcher};
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
use crate::service::HttpService;

View File

@@ -60,7 +60,7 @@ where
// TODO: There should be a way to do following two lines cleaner...
buf.put_slice(&prefix[..copy_len]);
prefix.advance(copy_len);
// Put back whats left
// Put back what's left
if !prefix.is_empty() {
self.pre = Some(prefix);
}

View File

@@ -18,10 +18,7 @@ pub(crate) mod io;
#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
mod lazy;
mod never;
#[cfg(any(
feature = "stream",
all(feature = "client", any(feature = "http1", feature = "http2"))
))]
#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
pub(crate) mod sync_wrapper;
pub(crate) mod task;
pub(crate) mod watch;

View File

@@ -48,7 +48,7 @@ pub(super) enum Kind {
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
HeaderTimeout,
/// Error while reading a body from connection.
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
#[cfg(any(feature = "http1", feature = "http2"))]
Body,
/// Error while writing a body to connection.
#[cfg(any(feature = "http1", feature = "http2"))]
@@ -294,7 +294,7 @@ impl Error {
Error::new(Kind::ChannelClosed)
}
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_body<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Body).with(cause)
}
@@ -440,7 +440,7 @@ impl Error {
Kind::Accept => "error accepting connection",
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
Kind::HeaderTimeout => "read header from client timeout",
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::Body => "error reading a body from connection",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::BodyWrite => "error writing a body to connection",

View File

@@ -1,12 +1,21 @@
//! HTTP extensions.
use bytes::Bytes;
#[cfg(any(feature = "http1", feature = "ffi"))]
use http::header::HeaderName;
#[cfg(feature = "http1")]
use http::header::{HeaderName, IntoHeaderName, ValueIter};
use http::header::{IntoHeaderName, ValueIter};
use http::HeaderMap;
#[cfg(feature = "ffi")]
use std::collections::HashMap;
#[cfg(feature = "http2")]
use std::fmt;
#[cfg(any(feature = "http1", feature = "ffi"))]
mod h1_reason_phrase;
#[cfg(any(feature = "http1", feature = "ffi"))]
pub use h1_reason_phrase::ReasonPhrase;
#[cfg(feature = "http2")]
/// Represents the `:protocol` pseudo-header used by
/// the [Extended CONNECT Protocol].
@@ -120,3 +129,99 @@ impl HeaderCaseMap {
self.0.append(name, orig);
}
}
#[cfg(feature = "ffi")]
#[derive(Clone, Debug)]
/// Hashmap<Headername, numheaders with that name>
pub(crate) struct OriginalHeaderOrder {
/// Stores how many entries a Headername maps to. This is used
/// for accounting.
num_entries: HashMap<HeaderName, usize>,
/// Stores the ordering of the headers. ex: `vec[i] = (headerName, idx)`,
/// The vector is ordered such that the ith element
/// represents the ith header that came in off the line.
/// The `HeaderName` and `idx` are then used elsewhere to index into
/// the multi map that stores the header values.
entry_order: Vec<(HeaderName, usize)>,
}
#[cfg(all(feature = "http1", feature = "ffi"))]
impl OriginalHeaderOrder {
pub(crate) fn default() -> Self {
OriginalHeaderOrder {
num_entries: HashMap::new(),
entry_order: Vec::new(),
}
}
pub(crate) fn insert(&mut self, name: HeaderName) {
if !self.num_entries.contains_key(&name) {
let idx = 0;
self.num_entries.insert(name.clone(), 1);
self.entry_order.push((name, idx));
}
// Replacing an already existing element does not
// change ordering, so we only care if its the first
// header name encountered
}
pub(crate) fn append<N>(&mut self, name: N)
where
N: IntoHeaderName + Into<HeaderName> + Clone,
{
let name: HeaderName = name.into();
let idx;
if self.num_entries.contains_key(&name) {
idx = self.num_entries[&name];
*self.num_entries.get_mut(&name).unwrap() += 1;
} else {
idx = 0;
self.num_entries.insert(name.clone(), 1);
}
self.entry_order.push((name, idx));
}
// No doc test is run here because `RUSTFLAGS='--cfg hyper_unstable_ffi'`
// is needed to compile. Once ffi is stablized `no_run` should be removed
// here.
/// This returns an iterator that provides header names and indexes
/// in the original order received.
///
/// # Examples
/// ```no_run
/// use hyper::ext::OriginalHeaderOrder;
/// use hyper::header::{HeaderName, HeaderValue, HeaderMap};
///
/// let mut h_order = OriginalHeaderOrder::default();
/// let mut h_map = Headermap::new();
///
/// let name1 = b"Set-CookiE";
/// let value1 = b"a=b";
/// h_map.append(name1);
/// h_order.append(name1);
///
/// let name2 = b"Content-Encoding";
/// let value2 = b"gzip";
/// h_map.append(name2, value2);
/// h_order.append(name2);
///
/// let name3 = b"SET-COOKIE";
/// let value3 = b"c=d";
/// h_map.append(name3, value3);
/// h_order.append(name3)
///
/// let mut iter = h_order.get_in_order()
///
/// let (name, idx) = iter.next();
/// assert_eq!(b"a=b", h_map.get_all(name).nth(idx).unwrap());
///
/// let (name, idx) = iter.next();
/// assert_eq!(b"gzip", h_map.get_all(name).nth(idx).unwrap());
///
/// let (name, idx) = iter.next();
/// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap());
/// ```
pub(crate) fn get_in_order(&self) -> impl Iterator<Item = &(HeaderName, usize)> {
self.entry_order.iter()
}
}

221
src/ext/h1_reason_phrase.rs Normal file
View File

@@ -0,0 +1,221 @@
use std::convert::TryFrom;
use bytes::Bytes;
/// A reason phrase in an HTTP/1 response.
///
/// # Clients
///
/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned
/// for a request if the reason phrase is different from the canonical reason phrase for the
/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the
/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`,
/// the response will not contain a `ReasonPhrase`.
///
/// ```no_run
/// # #[cfg(all(feature = "tcp", feature = "client", feature = "http1"))]
/// # async fn fake_fetch() -> hyper::Result<()> {
/// use hyper::{Client, Uri};
/// use hyper::ext::ReasonPhrase;
///
/// let res = Client::new().get(Uri::from_static("http://example.com/non_canonical_reason")).await?;
///
/// // Print out the non-canonical reason phrase, if it has one...
/// if let Some(reason) = res.extensions().get::<ReasonPhrase>() {
/// println!("non-canonical reason: {}", std::str::from_utf8(reason.as_bytes()).unwrap());
/// }
/// # Ok(())
/// # }
/// ```
///
/// # Servers
///
/// When a `ReasonPhrase` is present in the extensions of the `http::Response` written by a server,
/// its contents will be written in place of the canonical reason phrase when responding via HTTP/1.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ReasonPhrase(Bytes);
impl ReasonPhrase {
/// Gets the reason phrase as bytes.
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
/// Converts a static byte slice to a reason phrase.
pub fn from_static(reason: &'static [u8]) -> Self {
// TODO: this can be made const once MSRV is >= 1.57.0
if find_invalid_byte(reason).is_some() {
panic!("invalid byte in static reason phrase");
}
Self(Bytes::from_static(reason))
}
/// Converts a `Bytes` directly into a `ReasonPhrase` without validating.
///
/// Use with care; invalid bytes in a reason phrase can cause serious security problems if
/// emitted in a response.
pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self {
Self(reason)
}
}
impl TryFrom<&[u8]> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: &[u8]) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::copy_from_slice(reason)))
}
}
}
impl TryFrom<Vec<u8>> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: Vec<u8>) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(&reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::from(reason)))
}
}
}
impl TryFrom<String> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: String) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::from(reason)))
}
}
}
impl TryFrom<Bytes> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: Bytes) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(&reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(reason))
}
}
}
impl Into<Bytes> for ReasonPhrase {
fn into(self) -> Bytes {
self.0
}
}
impl AsRef<[u8]> for ReasonPhrase {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// Error indicating an invalid byte when constructing a `ReasonPhrase`.
///
/// See [the spec][spec] for details on allowed bytes.
///
/// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7
#[derive(Debug)]
pub struct InvalidReasonPhrase {
bad_byte: u8,
}
impl std::fmt::Display for InvalidReasonPhrase {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Invalid byte in reason phrase: {}", self.bad_byte)
}
}
impl std::error::Error for InvalidReasonPhrase {}
const fn is_valid_byte(b: u8) -> bool {
// See https://www.rfc-editor.org/rfc/rfc5234.html#appendix-B.1
const fn is_vchar(b: u8) -> bool {
0x21 <= b && b <= 0x7E
}
// See https://httpwg.org/http-core/draft-ietf-httpbis-semantics-latest.html#fields.values
//
// The 0xFF comparison is technically redundant, but it matches the text of the spec more
// clearly and will be optimized away.
#[allow(unused_comparisons)]
const fn is_obs_text(b: u8) -> bool {
0x80 <= b && b <= 0xFF
}
// See https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7
b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b)
}
const fn find_invalid_byte(bytes: &[u8]) -> Option<u8> {
let mut i = 0;
while i < bytes.len() {
let b = bytes[i];
if !is_valid_byte(b) {
return Some(b);
}
i += 1;
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_valid() {
const PHRASE: &'static [u8] = b"OK";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
}
#[test]
fn empty_valid() {
const PHRASE: &'static [u8] = b"";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
}
#[test]
fn obs_text_valid() {
const PHRASE: &'static [u8] = b"hyp\xe9r";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
}
const NEWLINE_PHRASE: &'static [u8] = b"hyp\ner";
#[test]
#[should_panic]
fn newline_invalid_panic() {
ReasonPhrase::from_static(NEWLINE_PHRASE);
}
#[test]
fn newline_invalid_err() {
assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err());
}
const CR_PHRASE: &'static [u8] = b"hyp\rer";
#[test]
#[should_panic]
fn cr_invalid_panic() {
ReasonPhrase::from_static(CR_PHRASE);
}
#[test]
fn cr_invalid_err() {
assert!(ReasonPhrase::try_from(CR_PHRASE).is_err());
}
}

View File

@@ -93,8 +93,7 @@ unsafe impl AsTaskType for hyper_clientconn {
ffi_fn! {
/// Creates a new set of HTTP clientconn options to be used in a handshake.
fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options {
let mut builder = conn::Builder::new();
builder.http1_preserve_header_case(true);
let builder = conn::Builder::new();
Box::into_raw(Box::new(hyper_clientconn_options {
builder,
@@ -103,6 +102,26 @@ ffi_fn! {
} ?= std::ptr::null_mut()
}
ffi_fn! {
/// Set the whether or not header case is preserved.
///
/// Pass `0` to allow lowercase normalization (default), `1` to retain original case.
fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) {
let opts = non_null! { &mut *opts ?= () };
opts.builder.http1_preserve_header_case(enabled != 0);
}
}
ffi_fn! {
/// Set the whether or not header order is preserved.
///
/// Pass `0` to allow reordering (default), `1` to retain original ordering.
fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) {
let opts = non_null! { &mut *opts ?= () };
opts.builder.http1_preserve_header_order(enabled != 0);
}
}
ffi_fn! {
/// Free a `hyper_clientconn_options *`.
fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) {
@@ -160,3 +179,16 @@ ffi_fn! {
hyper_code::HYPERE_OK
}
}
ffi_fn! {
/// Set whether HTTP/1 connections will accept obsolete line folding for header values.
/// Newline codepoints (\r and \n) will be transformed to spaces when parsing.
///
/// Pass `0` to disable, `1` to enable.
///
fn hyper_clientconn_options_http1_allow_multiline_headers(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
opts.builder.http1_allow_obsolete_multiline_headers_in_responses(enabled != 0);
hyper_code::HYPERE_OK
}
}

View File

@@ -6,7 +6,7 @@ use super::body::{hyper_body, hyper_buf};
use super::error::hyper_code;
use super::task::{hyper_task_return_type, AsTaskType};
use super::{UserDataPointer, HYPER_ITER_CONTINUE};
use crate::ext::HeaderCaseMap;
use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase};
use crate::header::{HeaderName, HeaderValue};
use crate::{Body, HeaderMap, Method, Request, Response, Uri};
@@ -22,11 +22,9 @@ pub struct hyper_response(pub(super) Response<Body>);
pub struct hyper_headers {
pub(super) headers: HeaderMap,
orig_casing: HeaderCaseMap,
orig_order: OriginalHeaderOrder,
}
#[derive(Debug)]
pub(crate) struct ReasonPhrase(pub(crate) Bytes);
pub(crate) struct RawHeaders(pub(crate) hyper_buf);
pub(crate) struct OnInformational {
@@ -233,6 +231,7 @@ impl hyper_request {
if let Some(headers) = self.0.extensions_mut().remove::<hyper_headers>() {
*self.0.headers_mut() = headers.headers;
self.0.extensions_mut().insert(headers.orig_casing);
self.0.extensions_mut().insert(headers.orig_order);
}
}
}
@@ -348,9 +347,14 @@ impl hyper_response {
.extensions_mut()
.remove::<HeaderCaseMap>()
.unwrap_or_else(HeaderCaseMap::default);
let orig_order = resp
.extensions_mut()
.remove::<OriginalHeaderOrder>()
.unwrap_or_else(OriginalHeaderOrder::default);
resp.extensions_mut().insert(hyper_headers {
headers,
orig_casing,
orig_order,
});
hyper_response(resp)
@@ -358,7 +362,7 @@ impl hyper_response {
fn reason_phrase(&self) -> &[u8] {
if let Some(reason) = self.0.extensions().get::<ReasonPhrase>() {
return &reason.0;
return reason.as_bytes();
}
if let Some(reason) = self.0.status().canonical_reason() {
@@ -404,6 +408,33 @@ ffi_fn! {
// and for each one, try to pair the originally cased name with the value.
//
// TODO: consider adding http::HeaderMap::entries() iterator
let mut ordered_iter = headers.orig_order.get_in_order().peekable();
if ordered_iter.peek().is_some() {
for (name, idx) in ordered_iter {
let (name_ptr, name_len) = if let Some(orig_name) = headers.orig_casing.get_all(name).nth(*idx) {
(orig_name.as_ref().as_ptr(), orig_name.as_ref().len())
} else {
(
name.as_str().as_bytes().as_ptr(),
name.as_str().as_bytes().len(),
)
};
let val_ptr;
let val_len;
if let Some(value) = headers.headers.get_all(name).iter().nth(*idx) {
val_ptr = value.as_bytes().as_ptr();
val_len = value.as_bytes().len();
} else {
// Stop iterating, something has gone wrong.
return;
}
if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) {
return;
}
}
} else {
for name in headers.headers.keys() {
let mut names = headers.orig_casing.get_all(name);
@@ -427,6 +458,7 @@ ffi_fn! {
}
}
}
}
ffi_fn! {
/// Sets the header with the provided name to the provided value.
@@ -437,7 +469,8 @@ ffi_fn! {
match unsafe { raw_name_value(name, name_len, value, value_len) } {
Ok((name, value, orig_name)) => {
headers.headers.insert(&name, value);
headers.orig_casing.insert(name, orig_name);
headers.orig_casing.insert(name.clone(), orig_name.clone());
headers.orig_order.insert(name);
hyper_code::HYPERE_OK
}
Err(code) => code,
@@ -456,7 +489,8 @@ ffi_fn! {
match unsafe { raw_name_value(name, name_len, value, value_len) } {
Ok((name, value, orig_name)) => {
headers.headers.append(&name, value);
headers.orig_casing.append(name, orig_name);
headers.orig_casing.append(&name, orig_name.clone());
headers.orig_order.append(name);
hyper_code::HYPERE_OK
}
Err(code) => code,
@@ -469,6 +503,7 @@ impl Default for hyper_headers {
Self {
headers: Default::default(),
orig_casing: HeaderCaseMap::default(),
orig_order: OriginalHeaderOrder::default(),
}
}
}
@@ -555,4 +590,68 @@ mod tests {
HYPER_ITER_CONTINUE
}
}
#[cfg(all(feature = "http1", feature = "ffi"))]
#[test]
fn test_headers_foreach_order_preserved() {
let mut headers = hyper_headers::default();
let name1 = b"Set-CookiE";
let value1 = b"a=b";
hyper_headers_add(
&mut headers,
name1.as_ptr(),
name1.len(),
value1.as_ptr(),
value1.len(),
);
let name2 = b"Content-Encoding";
let value2 = b"gzip";
hyper_headers_add(
&mut headers,
name2.as_ptr(),
name2.len(),
value2.as_ptr(),
value2.len(),
);
let name3 = b"SET-COOKIE";
let value3 = b"c=d";
hyper_headers_add(
&mut headers,
name3.as_ptr(),
name3.len(),
value3.as_ptr(),
value3.len(),
);
let mut vec = Vec::<u8>::new();
hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void);
println!("{}", std::str::from_utf8(&vec).unwrap());
assert_eq!(
vec,
b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n"
);
extern "C" fn concat(
vec: *mut c_void,
name: *const u8,
name_len: usize,
value: *const u8,
value_len: usize,
) -> c_int {
unsafe {
let vec = &mut *(vec as *mut Vec<u8>);
let name = std::slice::from_raw_parts(name, name_len);
let value = std::slice::from_raw_parts(value, value_len);
vec.extend(name);
vec.extend(b": ");
vec.extend(value);
vec.extend(b"\r\n");
}
HYPER_ITER_CONTINUE
}
}
}

View File

@@ -32,7 +32,7 @@ pub struct hyper_executor {
/// The executor of all task futures.
///
/// There should never be contention on the mutex, as it is only locked
/// to drive the futures. However, we cannot gaurantee proper usage from
/// to drive the futures. However, we cannot guarantee proper usage from
/// `hyper_executor_poll()`, which in C could potentially be called inside
/// one of the stored futures. The mutex isn't re-entrant, so doing so
/// would result in a deadlock, but that's better than data corruption.

View File

@@ -2,7 +2,7 @@
#![deny(missing_debug_implementations)]
#![cfg_attr(test, deny(rust_2018_idioms))]
#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))]
#![cfg_attr(test, deny(warnings))]
#![cfg_attr(all(test, feature = "full"), deny(warnings))]
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
#![cfg_attr(docsrs, feature(doc_cfg))]
@@ -52,7 +52,6 @@
//! - `runtime`: Enables convenient integration with `tokio`, providing
//! connectors and acceptors for TCP, and a default executor.
//! - `tcp`: Enables convenient implementations over TCP (using tokio).
//! - `stream`: Provides `futures::Stream` capabilities.
//!
//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section

View File

@@ -58,6 +58,8 @@ where
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
title_case_headers: false,
h09_responses: false,
#[cfg(feature = "ffi")]
@@ -111,6 +113,11 @@ where
self.state.preserve_header_case = true;
}
#[cfg(feature = "ffi")]
pub(crate) fn set_preserve_header_order(&mut self) {
self.state.preserve_header_order = true;
}
#[cfg(feature = "client")]
pub(crate) fn set_h09_responses(&mut self) {
self.state.h09_responses = true;
@@ -148,19 +155,15 @@ where
}
pub(crate) fn can_read_head(&self) -> bool {
match self.state.reading {
Reading::Init => {
if !matches!(self.state.reading, Reading::Init) {
return false;
}
if T::should_read_first() {
true
} else {
match self.state.writing {
Writing::Init => false,
_ => true,
}
}
}
_ => false,
return true;
}
!matches!(self.state.writing, Writing::Init)
}
pub(crate) fn can_read_body(&self) -> bool {
@@ -200,6 +203,8 @@ where
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running,
preserve_header_case: self.state.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: self.state.preserve_header_order,
h09_responses: self.state.h09_responses,
#[cfg(feature = "ffi")]
on_informational: &mut self.state.on_informational,
@@ -358,10 +363,10 @@ where
}
fn is_mid_message(&self) -> bool {
match (&self.state.reading, &self.state.writing) {
(&Reading::Init, &Writing::Init) => false,
_ => true,
}
!matches!(
(&self.state.reading, &self.state.writing),
(&Reading::Init, &Writing::Init)
)
}
// This will check to make sure the io object read is empty.
@@ -484,11 +489,10 @@ where
}
pub(crate) fn can_write_head(&self) -> bool {
if !T::should_read_first() {
if let Reading::Closed = self.state.reading {
if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) {
return false;
}
}
match self.state.writing {
Writing::Init => self.io.can_headers_buf(),
_ => false,
@@ -582,7 +586,7 @@ where
}
}
// Fix keep-alives when Connection: keep-alive header is not present
// Fix keep-alive when Connection: keep-alive header is not present
fn fix_keep_alive(&mut self, head: &mut MessageHead<T::Outgoing>) {
let outgoing_is_keep_alive = head
.headers
@@ -632,15 +636,15 @@ where
Writing::Body(ref mut encoder) => {
self.io.buffer(encoder.encode(chunk));
if encoder.is_eof() {
if !encoder.is_eof() {
return;
}
if encoder.is_last() {
Writing::Closed
} else {
Writing::KeepAlive
}
} else {
return;
}
}
_ => unreachable!("write_body invalid state: {:?}", self.state.writing),
};
@@ -671,32 +675,31 @@ where
pub(crate) fn end_body(&mut self) -> crate::Result<()> {
debug_assert!(self.can_write_body());
let mut res = Ok(());
let state = match self.state.writing {
Writing::Body(ref mut encoder) => {
let encoder = match self.state.writing {
Writing::Body(ref mut enc) => enc,
_ => return Ok(()),
};
// end of stream, that means we should try to eof
match encoder.end() {
Ok(end) => {
if let Some(end) = end {
self.io.buffer(end);
}
if encoder.is_last() || encoder.is_close_delimited() {
self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
Writing::Closed
} else {
Writing::KeepAlive
}
}
Err(not_eof) => {
res = Err(crate::Error::new_body_write_aborted().with(not_eof));
Writing::Closed
}
}
}
_ => return Ok(()),
};
self.state.writing = state;
res
Ok(())
}
Err(not_eof) => {
self.state.writing = Writing::Closed;
Err(crate::Error::new_body_write_aborted().with(not_eof))
}
}
}
// When we get a parse error, depending on what side we are, we might be able
@@ -749,10 +752,7 @@ where
// If still in Reading::Body, just give up
match self.state.reading {
Reading::Init | Reading::KeepAlive => {
trace!("body drained");
return;
}
Reading::Init | Reading::KeepAlive => trace!("body drained"),
_ => self.close_read(),
}
}
@@ -824,6 +824,8 @@ struct State {
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: bool,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
preserve_header_order: bool,
title_case_headers: bool,
h09_responses: bool,
/// If set, called with each 1xx informational response received for
@@ -1001,7 +1003,12 @@ impl State {
self.method = None;
self.keep_alive.idle();
if self.is_idle() {
if !self.is_idle() {
self.close();
return;
}
self.reading = Reading::Init;
self.writing = Writing::Init;
@@ -1013,31 +1020,18 @@ impl State {
if !T::should_read_first() {
self.notify_read = true;
}
} else {
self.close();
}
}
fn is_idle(&self) -> bool {
if let KA::Idle = self.keep_alive.status() {
true
} else {
false
}
matches!(self.keep_alive.status(), KA::Idle)
}
fn is_read_closed(&self) -> bool {
match self.reading {
Reading::Closed => true,
_ => false,
}
matches!(self.reading, Reading::Closed)
}
fn is_write_closed(&self) -> bool {
match self.writing {
Writing::Closed => true,
_ => false,
}
matches!(self.writing, Writing::Closed)
}
fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade {

View File

@@ -1,17 +1,17 @@
use std::cmp;
use std::fmt;
#[cfg(all(feature = "server", feature = "runtime"))]
use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::Unpin;
use std::mem::MaybeUninit;
#[cfg(all(feature = "server", feature = "runtime"))]
use std::future::Future;
#[cfg(all(feature = "server", feature = "runtime"))]
use std::time::Duration;
#[cfg(all(feature = "server", feature = "runtime"))]
use tokio::time::Instant;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
#[cfg(all(feature = "server", feature = "runtime"))]
use tokio::time::Instant;
use tracing::{debug, trace};
use super::{Http1Transaction, ParseContext, ParsedMessage};
@@ -194,6 +194,8 @@ where
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running,
preserve_header_case: parse_ctx.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: parse_ctx.preserve_header_order,
h09_responses: parse_ctx.h09_responses,
#[cfg(feature = "ffi")]
on_informational: parse_ctx.on_informational,
@@ -208,9 +210,13 @@ where
{
*parse_ctx.h1_header_read_timeout_running = false;
if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut {
if let Some(h1_header_read_timeout_fut) =
parse_ctx.h1_header_read_timeout_fut
{
// Reset the timer in order to avoid woken up when the timeout finishes
h1_header_read_timeout_fut.as_mut().reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60));
h1_header_read_timeout_fut
.as_mut()
.reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60));
}
}
return Poll::Ready(Ok(msg));
@@ -224,12 +230,14 @@ where
#[cfg(all(feature = "server", feature = "runtime"))]
if *parse_ctx.h1_header_read_timeout_running {
if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut {
if let Some(h1_header_read_timeout_fut) =
parse_ctx.h1_header_read_timeout_fut
{
if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() {
*parse_ctx.h1_header_read_timeout_running = false;
tracing::warn!("read header from client timeout");
return Poll::Ready(Err(crate::Error::new_header_timeout()))
return Poll::Ready(Err(crate::Error::new_header_timeout()));
}
}
}
@@ -727,10 +735,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -894,9 +907,7 @@ mod tests {
async fn write_buf_flatten() {
let _ = pretty_env_logger::try_init();
let mock = Mock::new()
.write(b"hello world, it's hyper!")
.build();
let mock = Mock::new().write(b"hello world, it's hyper!").build();
let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
buffered.write_buf.set_strategy(WriteStrategy::Flatten);

View File

@@ -24,7 +24,6 @@ mod encode;
mod io;
mod role;
cfg_client! {
pub(crate) type ClientTransaction = role::Client;
}
@@ -84,6 +83,8 @@ pub(crate) struct ParseContext<'a> {
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: &'a mut bool,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
preserve_header_order: bool,
h09_responses: bool,
#[cfg(feature = "ffi")]
on_informational: &'a mut Option<crate::ffi::OnInformational>,

View File

@@ -1,15 +1,14 @@
use std::fmt::{self, Write};
use std::mem::MaybeUninit;
#[cfg(all(feature = "server", feature = "runtime"))]
use tokio::time::Instant;
#[cfg(any(test, feature = "server", feature = "ffi"))]
use bytes::Bytes;
use bytes::BytesMut;
#[cfg(feature = "server")]
use http::header::ValueIter;
use http::header::{self, Entry, HeaderName, HeaderValue};
use http::{HeaderMap, Method, StatusCode, Version};
#[cfg(all(feature = "server", feature = "runtime"))]
use tokio::time::Instant;
use tracing::{debug, error, trace, trace_span, warn};
use crate::body::DecodedLength;
@@ -17,6 +16,8 @@ use crate::body::DecodedLength;
use crate::common::date;
use crate::error::Parse;
use crate::ext::HeaderCaseMap;
#[cfg(feature = "ffi")]
use crate::ext::OriginalHeaderOrder;
use crate::headers;
use crate::proto::h1::{
Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage,
@@ -78,7 +79,7 @@ where
if !*ctx.h1_header_read_timeout_running {
if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout {
let deadline = Instant::now() + h1_header_read_timeout;
*ctx.h1_header_read_timeout_running = true;
match ctx.h1_header_read_timeout_fut {
Some(h1_header_read_timeout_fut) => {
debug!("resetting h1 header read timeout timer");
@@ -214,6 +215,13 @@ impl Http1Transaction for Server {
None
};
#[cfg(feature = "ffi")]
let mut header_order = if ctx.preserve_header_order {
Some(OriginalHeaderOrder::default())
} else {
None
};
let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new);
headers.reserve(headers_len);
@@ -290,6 +298,11 @@ impl Http1Transaction for Server {
header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
}
#[cfg(feature = "ffi")]
if let Some(ref mut header_order) = header_order {
header_order.append(&name);
}
headers.append(name, value);
}
@@ -304,6 +317,11 @@ impl Http1Transaction for Server {
extensions.insert(header_case_map);
}
#[cfg(feature = "ffi")]
if let Some(header_order) = header_order {
extensions.insert(header_order);
}
*ctx.req_method = Some(subject.0.clone());
Ok(Some(ParsedMessage {
@@ -358,7 +376,13 @@ impl Http1Transaction for Server {
let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
if msg.head.version == Version::HTTP_11 && msg.head.subject == StatusCode::OK {
let custom_reason_phrase = msg.head.extensions.get::<crate::ext::ReasonPhrase>();
if msg.head.version == Version::HTTP_11
&& msg.head.subject == StatusCode::OK
&& custom_reason_phrase.is_none()
{
extend(dst, b"HTTP/1.1 200 OK\r\n");
} else {
match msg.head.version {
@@ -373,6 +397,10 @@ impl Http1Transaction for Server {
extend(dst, msg.head.subject.as_str().as_bytes());
extend(dst, b" ");
if let Some(reason) = custom_reason_phrase {
extend(dst, reason.as_bytes());
} else {
// a reason MUST be written, as many parsers will expect it.
extend(
dst,
@@ -382,6 +410,8 @@ impl Http1Transaction for Server {
.unwrap_or("<none>")
.as_bytes(),
);
}
extend(dst, b"\r\n");
}
@@ -468,6 +498,10 @@ impl Server {
}
}
fn can_have_implicit_zero_content_length(method: &Option<Method>, status: StatusCode) -> bool {
Server::can_have_content_length(method, status) && method != &Some(Method::HEAD)
}
fn encode_headers_with_lower_case(
msg: Encode<'_, StatusCode>,
dst: &mut Vec<u8>,
@@ -820,7 +854,10 @@ impl Server {
}
}
None | Some(BodyLength::Known(0)) => {
if Server::can_have_content_length(msg.req_method, msg.head.subject) {
if Server::can_have_implicit_zero_content_length(
msg.req_method,
msg.head.subject,
) {
header_name_writer.write_full_header_line(
dst,
"content-length: 0\r\n",
@@ -918,12 +955,9 @@ impl Http1Transaction for Client {
trace!("Response.parse Complete({})", len);
let status = StatusCode::from_u16(res.code.unwrap())?;
#[cfg(not(feature = "ffi"))]
let reason = ();
#[cfg(feature = "ffi")]
let reason = {
let reason = res.reason.unwrap();
// Only save the reason phrase if it isnt the canonical reason
// Only save the reason phrase if it isn't the canonical reason
if Some(reason) != status.canonical_reason() {
Some(Bytes::copy_from_slice(reason.as_bytes()))
} else {
@@ -944,12 +978,7 @@ impl Http1Transaction for Client {
Err(httparse::Error::Version) if ctx.h09_responses => {
trace!("Response.parse accepted HTTP/0.9 response");
#[cfg(not(feature = "ffi"))]
let reason = ();
#[cfg(feature = "ffi")]
let reason = None;
(0, StatusCode::OK, reason, Version::HTTP_09, 0)
(0, StatusCode::OK, None, Version::HTTP_09, 0)
}
Err(e) => return Err(e.into()),
}
@@ -957,7 +986,10 @@ impl Http1Transaction for Client {
let mut slice = buf.split_to(len);
if ctx.h1_parser_config.obsolete_multiline_headers_in_responses_are_allowed() {
if ctx
.h1_parser_config
.obsolete_multiline_headers_in_responses_are_allowed()
{
for header in &headers_indices[..headers_len] {
// SAFETY: array is valid up to `headers_len`
let header = unsafe { &*header.as_ptr() };
@@ -981,6 +1013,13 @@ impl Http1Transaction for Client {
None
};
#[cfg(feature = "ffi")]
let mut header_order = if ctx.preserve_header_order {
Some(OriginalHeaderOrder::default())
} else {
None
};
headers.reserve(headers_len);
for header in &headers_indices[..headers_len] {
// SAFETY: array is valid up to `headers_len`
@@ -1003,6 +1042,11 @@ impl Http1Transaction for Client {
header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
}
#[cfg(feature = "ffi")]
if let Some(ref mut header_order) = header_order {
header_order.append(&name);
}
headers.append(name, value);
}
@@ -1013,11 +1057,16 @@ impl Http1Transaction for Client {
}
#[cfg(feature = "ffi")]
if let Some(reason) = reason {
extensions.insert(crate::ffi::ReasonPhrase(reason));
if let Some(header_order) = header_order {
extensions.insert(header_order);
}
if let Some(reason) = reason {
// Safety: httparse ensures that only valid reason phrase bytes are present in this
// field.
let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) };
extensions.insert(reason);
}
#[cfg(not(feature = "ffi"))]
drop(reason);
#[cfg(feature = "ffi")]
if ctx.raw_headers {
@@ -1474,10 +1523,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut method,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1504,10 +1558,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1529,10 +1588,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1552,10 +1616,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: true,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1577,10 +1646,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1606,10 +1680,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config,
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1632,10 +1711,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1653,10 +1737,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: true,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1695,10 +1784,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1718,10 +1812,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1950,10 +2049,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1973,10 +2077,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(m),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -1996,10 +2105,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -2496,10 +2610,15 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -2583,10 +2702,15 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,
@@ -2626,10 +2750,15 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
#[cfg(feature = "runtime")]
h1_header_read_timeout: None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
h09_responses: false,
#[cfg(feature = "ffi")]
on_informational: &mut None,

View File

@@ -35,6 +35,8 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb
// 16 MB "sane default" taken from golang http2
const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20;
#[derive(Clone, Debug)]
pub(crate) struct Config {
@@ -49,6 +51,7 @@ pub(crate) struct Config {
#[cfg(feature = "runtime")]
pub(crate) keep_alive_timeout: Duration,
pub(crate) max_send_buffer_size: usize,
pub(crate) max_header_list_size: u32,
}
impl Default for Config {
@@ -65,6 +68,7 @@ impl Default for Config {
#[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE,
}
}
}
@@ -116,6 +120,7 @@ where
.initial_window_size(config.initial_stream_window_size)
.initial_connection_window_size(config.initial_conn_window_size)
.max_frame_size(config.max_frame_size)
.max_header_list_size(config.max_header_list_size)
.max_send_buffer_size(config.max_send_buffer_size);
if let Some(max) = config.max_concurrent_streams {
builder.max_concurrent_streams(max);
@@ -138,7 +143,7 @@ where
#[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
// If keep-alive is enabled for servers, always enabled while
// idle, so it can more aggresively close dead connections.
// idle, so it can more aggressively close dead connections.
#[cfg(feature = "runtime")]
keep_alive_while_idle: true,
};
@@ -259,7 +264,7 @@ where
let reason = err.h2_reason();
if reason == Reason::NO_ERROR {
// NO_ERROR is only used for graceful shutdowns...
trace!("interpretting NO_ERROR user error as graceful_shutdown");
trace!("interpreting NO_ERROR user error as graceful_shutdown");
self.conn.graceful_shutdown();
} else {
trace!("abruptly shutting down with {:?}", reason);

View File

@@ -6,11 +6,6 @@
//! connections.
//! - Utilities like `poll_fn` to ease creating a custom `Accept`.
#[cfg(feature = "stream")]
use futures_core::Stream;
#[cfg(feature = "stream")]
use pin_project_lite::pin_project;
use crate::common::{
task::{self, Poll},
Pin,
@@ -74,38 +69,3 @@ where
PollFn(func)
}
/// Adapt a `Stream` of incoming connections into an `Accept`.
///
/// # Optional
///
/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
#[cfg(feature = "stream")]
pub fn from_stream<S, IO, E>(stream: S) -> impl Accept<Conn = IO, Error = E>
where
S: Stream<Item = Result<IO, E>>,
{
pin_project! {
struct FromStream<S> {
#[pin]
stream: S,
}
}
impl<S, IO, E> Accept for FromStream<S>
where
S: Stream<Item = Result<IO, E>>,
{
type Conn = IO;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
self.project().stream.poll_next(cx)
}
}
FromStream { stream }
}

View File

@@ -48,8 +48,7 @@
not(all(feature = "http1", feature = "http2"))
))]
use std::marker::PhantomData;
#[cfg(feature = "tcp")]
use std::net::SocketAddr;
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "runtime"))]
use std::time::Duration;
#[cfg(feature = "http2")]
@@ -70,23 +69,18 @@ cfg_feature! {
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::trace;
use super::accept::Accept;
pub use super::server::Connecting;
use crate::body::{Body, HttpBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
#[cfg(not(all(feature = "http1", feature = "http2")))]
use crate::common::Never;
use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec};
use crate::common::exec::{ConnStreamExec, Exec};
use crate::proto;
use crate::service::{HttpService, MakeServiceRef};
use self::spawn_all::NewSvcTask;
use crate::service::HttpService;
pub(super) use self::spawn_all::{NoopWatcher, Watcher};
pub(super) use self::upgrades::UpgradeableConnection;
}
#[cfg(feature = "tcp")]
pub use super::tcp::{AddrIncoming, AddrStream};
/// A lower-level configuration of the HTTP protocol.
///
/// This structure is used to configure options for an HTTP server connection.
@@ -97,7 +91,7 @@ pub use super::tcp::{AddrIncoming, AddrStream};
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub struct Http<E = Exec> {
exec: E,
pub(crate) exec: E,
h1_half_close: bool,
h1_keep_alive: bool,
h1_title_case_headers: bool,
@@ -127,51 +121,6 @@ enum ConnectionMode {
Fallback,
}
#[cfg(any(feature = "http1", feature = "http2"))]
pin_project! {
/// A stream mapping incoming IOs to new services.
///
/// Yields `Connecting`s that are futures that should be put on a reactor.
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub(super) struct Serve<I, S, E = Exec> {
#[pin]
incoming: I,
make_service: S,
protocol: Http<E>,
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pin_project! {
/// A future building a new `Service` to a `Connection`.
///
/// Wraps the future returned from `MakeService` into one that returns
/// a `Connection`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub struct Connecting<I, F, E = Exec> {
#[pin]
future: F,
io: Option<I>,
protocol: Http<E>,
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pin_project! {
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub(super) struct SpawnAll<I, S, E> {
// TODO: re-add `pub(super)` once rustdoc can handle this.
//
// See https://github.com/rust-lang/rust/issues/64705
#[pin]
pub(super) serve: Serve<I, S, E>,
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pin_project! {
/// A future binding a connection with a Service.
@@ -567,6 +516,16 @@ impl<E> Http<E> {
self
}
/// Sets the max size of received header frames.
///
/// Default is currently ~16MB, but may change.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self {
self.h2_builder.max_header_list_size = max;
self
}
/// Set the maximum buffer size for the connection.
///
/// Default is ~400kb.
@@ -719,23 +678,6 @@ impl<E> Http<E> {
fallback: PhantomData,
}
}
pub(super) fn serve<I, IO, IE, S, Bd>(&self, incoming: I, make_service: S) -> Serve<I, S, E>
where
I: Accept<Conn = IO, Error = IE>,
IE: Into<Box<dyn StdError + Send + Sync>>,
IO: AsyncRead + AsyncWrite + Unpin,
S: MakeServiceRef<IO, Body, ResBody = Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bd: HttpBody,
E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, Bd>,
{
Serve {
incoming,
make_service,
protocol: self.clone(),
}
}
}
// ===== impl Connection =====
@@ -987,141 +929,6 @@ impl Default for ConnectionMode {
}
}
// ===== impl Serve =====
#[cfg(any(feature = "http1", feature = "http2"))]
impl<I, S, E> Serve<I, S, E> {
/// Get a reference to the incoming stream.
#[inline]
pub(super) fn incoming_ref(&self) -> &I {
&self.incoming
}
/*
/// Get a mutable reference to the incoming stream.
#[inline]
pub fn incoming_mut(&mut self) -> &mut I {
&mut self.incoming
}
*/
/// Spawn all incoming connections onto the executor in `Http`.
pub(super) fn spawn_all(self) -> SpawnAll<I, S, E> {
SpawnAll { serve: self }
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl<I, IO, IE, S, B, E> Serve<I, S, E>
where
I: Accept<Conn = IO, Error = IE>,
IO: AsyncRead + AsyncWrite + Unpin,
IE: Into<Box<dyn StdError + Send + Sync>>,
S: MakeServiceRef<IO, Body, ResBody = B>,
B: HttpBody,
E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
{
fn poll_next_(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> {
let me = self.project();
match ready!(me.make_service.poll_ready_ref(cx)) {
Ok(()) => (),
Err(e) => {
trace!("make_service closed");
return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e))));
}
}
if let Some(item) = ready!(me.incoming.poll_accept(cx)) {
let io = item.map_err(crate::Error::new_accept)?;
let new_fut = me.make_service.make_service_ref(&io);
Poll::Ready(Some(Ok(Connecting {
future: new_fut,
io: Some(io),
protocol: me.protocol.clone(),
})))
} else {
Poll::Ready(None)
}
}
}
// ===== impl Connecting =====
#[cfg(any(feature = "http1", feature = "http2"))]
impl<I, F, S, FE, E, B> Future for Connecting<I, F, E>
where
I: AsyncRead + AsyncWrite + Unpin,
F: Future<Output = Result<S, FE>>,
S: HttpService<Body, ResBody = B>,
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
type Output = Result<Connection<I, S, E>, FE>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
let service = ready!(me.future.poll(cx))?;
let io = Option::take(&mut me.io).expect("polled after complete");
Poll::Ready(Ok(me.protocol.serve_connection(io, service)))
}
}
// ===== impl SpawnAll =====
#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))]
impl<S, E> SpawnAll<AddrIncoming, S, E> {
pub(super) fn local_addr(&self) -> SocketAddr {
self.serve.incoming.local_addr()
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl<I, S, E> SpawnAll<I, S, E> {
pub(super) fn incoming_ref(&self) -> &I {
self.serve.incoming_ref()
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl<I, IO, IE, S, B, E> SpawnAll<I, S, E>
where
I: Accept<Conn = IO, Error = IE>,
IE: Into<Box<dyn StdError + Send + Sync>>,
IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<IO, Body, ResBody = B>,
B: HttpBody,
E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
{
pub(super) fn poll_watch<W>(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
watcher: &W,
) -> Poll<crate::Result<()>>
where
E: NewSvcExec<IO, S::Future, S::Service, E, W>,
W: Watcher<IO, S::Service, E>,
{
let mut me = self.project();
loop {
if let Some(connecting) = ready!(me.serve.as_mut().poll_next_(cx)?) {
let fut = NewSvcTask::new(connecting, watcher.clone());
me.serve
.as_mut()
.project()
.protocol
.exec
.execute_new_svc(fut);
} else {
return Poll::Ready(Ok(()));
}
}
}
}
// ===== impl ProtoServer =====
#[cfg(any(feature = "http1", feature = "http2"))]
@@ -1151,150 +958,6 @@ where
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(crate) mod spawn_all {
use std::error::Error as StdError;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::debug;
use super::{Connecting, UpgradeableConnection};
use crate::body::{Body, HttpBody};
use crate::common::exec::ConnStreamExec;
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::HttpService;
use pin_project_lite::pin_project;
// Used by `SpawnAll` to optionally watch a `Connection` future.
//
// The regular `hyper::Server` just uses a `NoopWatcher`, which does
// not need to watch anything, and so returns the `Connection` untouched.
//
// The `Server::with_graceful_shutdown` needs to keep track of all active
// connections, and signal that they start to shutdown when prompted, so
// it has a `GracefulWatcher` implementation to do that.
pub trait Watcher<I, S: HttpService<Body>, E>: Clone {
type Future: Future<Output = crate::Result<()>>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future;
}
#[allow(missing_debug_implementations)]
#[derive(Copy, Clone)]
pub struct NoopWatcher;
impl<I, S, E> Watcher<I, S, E> for NoopWatcher
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: HttpService<Body>,
E: ConnStreamExec<S::Future, S::ResBody>,
S::ResBody: 'static,
<S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Future = UpgradeableConnection<I, S, E>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
conn
}
}
// This is a `Future<Item=(), Error=()>` spawned to an `Executor` inside
// the `SpawnAll`. By being a nameable type, we can be generic over the
// user's `Service::Future`, and thus an `Executor` can execute it.
//
// Doing this allows for the server to conditionally require `Send` futures,
// depending on the `Executor` configured.
//
// Users cannot import this type, nor the associated `NewSvcExec`. Instead,
// a blanket implementation for `Executor<impl Future>` is sufficient.
pin_project! {
#[allow(missing_debug_implementations)]
pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
#[pin]
state: State<I, N, S, E, W>,
}
}
pin_project! {
#[project = StateProj]
pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
Connecting {
#[pin]
connecting: Connecting<I, N, E>,
watcher: W,
},
Connected {
#[pin]
future: W::Future,
},
}
}
impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
NewSvcTask {
state: State::Connecting {
connecting,
watcher,
},
}
}
}
impl<I, N, S, NE, B, E, W> Future for NewSvcTask<I, N, S, E, W>
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
N: Future<Output = Result<S, NE>>,
NE: Into<Box<dyn StdError + Send + Sync>>,
S: HttpService<Body, ResBody = B>,
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
W: Watcher<I, S, E>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
// If it weren't for needing to name this type so the `Send` bounds
// could be projected to the `Serve` executor, this could just be
// an `async fn`, and much safer. Woe is me.
let mut me = self.project();
loop {
let next = {
match me.state.as_mut().project() {
StateProj::Connecting {
connecting,
watcher,
} => {
let res = ready!(connecting.poll(cx));
let conn = match res {
Ok(conn) => conn,
Err(err) => {
let err = crate::Error::new_user_make_service(err);
debug!("connecting error: {}", err);
return Poll::Ready(());
}
};
let future = watcher.watch(conn.with_upgrades());
State::Connected { future }
}
StateProj::Connected { future } => {
return future.poll(cx).map(|res| {
if let Err(err) = res {
debug!("connection error: {}", err);
}
});
}
}
};
me.state.set(next);
}
}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
mod upgrades {
use super::*;

View File

@@ -91,7 +91,8 @@
//! use std::net::SocketAddr;
//! use hyper::{Body, Request, Response, Server};
//! use hyper::service::{make_service_fn, service_fn};
//! use hyper::server::conn::AddrStream;
//! # #[cfg(feature = "runtime")]
//! use tokio::net::TcpStream;
//!
//! #[derive(Clone)]
//! struct AppContext {
@@ -114,14 +115,14 @@
//! };
//!
//! // A `MakeService` that produces a `Service` to handle each connection.
//! let make_service = make_service_fn(move |conn: &AddrStream| {
//! let make_service = make_service_fn(move |conn: &TcpStream| {
//! // We have to clone the context to share it with each invocation of
//! // `make_service`. If your data doesn't implement `Clone` consider using
//! // an `std::sync::Arc`.
//! let context = context.clone();
//!
//! // You can grab the address of the incoming connection like so.
//! let addr = conn.remote_addr();
//! let addr = conn.peer_addr().unwrap();
//!
//! // Create a `Service` for responding to the request.
//! let service = service_fn(move |req| {
@@ -149,7 +150,6 @@
pub mod accept;
pub mod conn;
mod server;
#[cfg(feature = "tcp")]
mod tcp;
@@ -158,7 +158,15 @@ pub use self::server::Server;
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
pub(crate) mod server;
pub use self::server::Builder;
mod shutdown;
}
cfg_feature! {
#![not(any(feature = "http1", feature = "http2"))]
mod server_stub;
use server_stub as server;
}

View File

@@ -1,33 +1,29 @@
use std::error::Error as StdError;
use std::fmt;
#[cfg(feature = "tcp")]
use std::net::{SocketAddr, TcpListener as StdTcpListener};
#[cfg(any(feature = "tcp", feature = "http1"))]
use std::time::Duration;
#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))]
use super::tcp::AddrIncoming;
use crate::common::exec::Exec;
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
use std::error::Error as StdError;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::trace;
use super::accept::Accept;
#[cfg(all(feature = "tcp"))]
use super::tcp::AddrIncoming;
use crate::body::{Body, HttpBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::common::exec::Exec;
use crate::common::exec::{ConnStreamExec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
// Renamed `Http` as `Http_` for now so that people upgrading don't see an
// error that `hyper::server::Http` is private...
use super::conn::{Http as Http_, NoopWatcher, SpawnAll};
use super::conn::{Connection, Http as Http_, UpgradeableConnection};
use super::shutdown::{Graceful, GracefulWatcher};
use crate::service::{HttpService, MakeServiceRef};
}
#[cfg(any(feature = "http1", feature = "http2"))]
use self::new_svc::NewSvcTask;
pin_project! {
/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
///
@@ -37,21 +33,14 @@ pin_project! {
/// `Executor`.
pub struct Server<I, S, E = Exec> {
#[pin]
spawn_all: SpawnAll<I, S, E>,
incoming: I,
make_service: S,
protocol: Http_<E>,
}
}
/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
///
/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful.
#[cfg(not(any(feature = "http1", feature = "http2")))]
pub struct Server<I, S, E = Exec> {
_marker: std::marker::PhantomData<(I, S, E)>,
}
/// A builder for a [`Server`](Server).
#[derive(Debug)]
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub struct Builder<I, E = Exec> {
incoming: I,
@@ -60,7 +49,6 @@ pub struct Builder<I, E = Exec> {
// ===== impl Server =====
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I> Server<I, ()> {
/// Starts a [`Builder`](Builder) with the provided incoming stream.
@@ -72,9 +60,11 @@ impl<I> Server<I, ()> {
}
}
cfg_feature! {
#![all(feature = "tcp", any(feature = "http1", feature = "http2"))]
#[cfg(feature = "tcp")]
#[cfg_attr(
docsrs,
doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
)]
impl Server<AddrIncoming, ()> {
/// Binds to the provided address, and returns a [`Builder`](Builder).
///
@@ -99,20 +89,19 @@ cfg_feature! {
AddrIncoming::from_std(listener).map(Server::builder)
}
}
}
cfg_feature! {
#![all(feature = "tcp", any(feature = "http1", feature = "http2"))]
#[cfg(feature = "tcp")]
#[cfg_attr(
docsrs,
doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
)]
impl<S, E> Server<AddrIncoming, S, E> {
/// Returns the local address that this server is bound to.
pub fn local_addr(&self) -> SocketAddr {
self.spawn_all.local_addr()
}
self.incoming.local_addr()
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, IO, IE, S, E, B> Server<I, S, E>
where
@@ -124,7 +113,6 @@ where
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
/// Prepares a server to handle graceful shutdown when the provided future
/// completes.
@@ -165,12 +153,57 @@ where
pub fn with_graceful_shutdown<F>(self, signal: F) -> Graceful<I, S, F, E>
where
F: Future<Output = ()>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
Graceful::new(self.spawn_all, signal)
Graceful::new(self, signal)
}
fn poll_next_(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> {
let me = self.project();
match ready!(me.make_service.poll_ready_ref(cx)) {
Ok(()) => (),
Err(e) => {
trace!("make_service closed");
return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e))));
}
}
if let Some(item) = ready!(me.incoming.poll_accept(cx)) {
let io = item.map_err(crate::Error::new_accept)?;
let new_fut = me.make_service.make_service_ref(&io);
Poll::Ready(Some(Ok(Connecting {
future: new_fut,
io: Some(io),
protocol: me.protocol.clone(),
})))
} else {
Poll::Ready(None)
}
}
pub(super) fn poll_watch<W>(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
watcher: &W,
) -> Poll<crate::Result<()>>
where
E: NewSvcExec<IO, S::Future, S::Service, E, W>,
W: Watcher<IO, S::Service, E>,
{
loop {
if let Some(connecting) = ready!(self.as_mut().poll_next_(cx)?) {
let fut = NewSvcTask::new(connecting, watcher.clone());
self.as_mut().project().protocol.exec.execute_new_svc(fut);
} else {
return Poll::Ready(Ok(()));
}
}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, IO, IE, S, B, E> Future for Server<I, S, E>
where
@@ -187,22 +220,20 @@ where
type Output = crate::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.project().spawn_all.poll_watch(cx, &NoopWatcher)
self.poll_watch(cx, &NoopWatcher)
}
}
impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut st = f.debug_struct("Server");
#[cfg(any(feature = "http1", feature = "http2"))]
st.field("listener", &self.spawn_all.incoming_ref());
st.field("listener", &self.incoming);
st.finish()
}
}
// ===== impl Builder =====
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, E> Builder<I, E> {
/// Start a new builder, wrapping an incoming stream and low-level options.
@@ -502,7 +533,7 @@ impl<I, E> Builder<I, E> {
/// }
/// # }
/// ```
pub fn serve<S, B>(self, new_service: S) -> Server<I, S, E>
pub fn serve<S, B>(self, make_service: S) -> Server<I, S, E>
where
I: Accept,
I::Error: Into<Box<dyn StdError + Send + Sync>>,
@@ -514,13 +545,19 @@ impl<I, E> Builder<I, E> {
E: NewSvcExec<I::Conn, S::Future, S::Service, E, NoopWatcher>,
E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
{
let serve = self.protocol.serve(self.incoming, new_service);
let spawn_all = serve.spawn_all();
Server { spawn_all }
Server {
incoming: self.incoming,
make_service,
protocol: self.protocol.clone(),
}
}
}
#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))]
#[cfg(feature = "tcp")]
#[cfg_attr(
docsrs,
doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
)]
impl<E> Builder<AddrIncoming, E> {
/// Set whether TCP keepalive messages are enabled on accepted connections.
///
@@ -558,3 +595,182 @@ impl<E> Builder<AddrIncoming, E> {
self
}
}
// Used by `Server` to optionally watch a `Connection` future.
//
// The regular `hyper::Server` just uses a `NoopWatcher`, which does
// not need to watch anything, and so returns the `Connection` untouched.
//
// The `Server::with_graceful_shutdown` needs to keep track of all active
// connections, and signal that they start to shutdown when prompted, so
// it has a `GracefulWatcher` implementation to do that.
pub trait Watcher<I, S: HttpService<Body>, E>: Clone {
type Future: Future<Output = crate::Result<()>>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future;
}
#[allow(missing_debug_implementations)]
#[derive(Copy, Clone)]
pub struct NoopWatcher;
impl<I, S, E> Watcher<I, S, E> for NoopWatcher
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: HttpService<Body>,
E: ConnStreamExec<S::Future, S::ResBody>,
S::ResBody: 'static,
<S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Future = UpgradeableConnection<I, S, E>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
conn
}
}
// used by exec.rs
pub(crate) mod new_svc {
use std::error::Error as StdError;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::debug;
use super::{Connecting, Watcher};
use crate::body::{Body, HttpBody};
use crate::common::exec::ConnStreamExec;
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::HttpService;
use pin_project_lite::pin_project;
// This is a `Future<Item=(), Error=()>` spawned to an `Executor` inside
// the `Server`. By being a nameable type, we can be generic over the
// user's `Service::Future`, and thus an `Executor` can execute it.
//
// Doing this allows for the server to conditionally require `Send` futures,
// depending on the `Executor` configured.
//
// Users cannot import this type, nor the associated `NewSvcExec`. Instead,
// a blanket implementation for `Executor<impl Future>` is sufficient.
pin_project! {
#[allow(missing_debug_implementations)]
pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
#[pin]
state: State<I, N, S, E, W>,
}
}
pin_project! {
#[project = StateProj]
pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
Connecting {
#[pin]
connecting: Connecting<I, N, E>,
watcher: W,
},
Connected {
#[pin]
future: W::Future,
},
}
}
impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
NewSvcTask {
state: State::Connecting {
connecting,
watcher,
},
}
}
}
impl<I, N, S, NE, B, E, W> Future for NewSvcTask<I, N, S, E, W>
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
N: Future<Output = Result<S, NE>>,
NE: Into<Box<dyn StdError + Send + Sync>>,
S: HttpService<Body, ResBody = B>,
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
W: Watcher<I, S, E>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
// If it weren't for needing to name this type so the `Send` bounds
// could be projected to the `Serve` executor, this could just be
// an `async fn`, and much safer. Woe is me.
let mut me = self.project();
loop {
let next = {
match me.state.as_mut().project() {
StateProj::Connecting {
connecting,
watcher,
} => {
let res = ready!(connecting.poll(cx));
let conn = match res {
Ok(conn) => conn,
Err(err) => {
let err = crate::Error::new_user_make_service(err);
debug!("connecting error: {}", err);
return Poll::Ready(());
}
};
let future = watcher.watch(conn.with_upgrades());
State::Connected { future }
}
StateProj::Connected { future } => {
return future.poll(cx).map(|res| {
if let Err(err) = res {
debug!("connection error: {}", err);
}
});
}
}
};
me.state.set(next);
}
}
}
}
pin_project! {
/// A future building a new `Service` to a `Connection`.
///
/// Wraps the future returned from `MakeService` into one that returns
/// a `Connection`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub struct Connecting<I, F, E = Exec> {
#[pin]
future: F,
io: Option<I>,
protocol: Http_<E>,
}
}
impl<I, F, S, FE, E, B> Future for Connecting<I, F, E>
where
I: AsyncRead + AsyncWrite + Unpin,
F: Future<Output = Result<S, FE>>,
S: HttpService<Body, ResBody = B>,
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
type Output = Result<Connection<I, S, E>, FE>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
let service = ready!(me.future.poll(cx))?;
let io = Option::take(&mut me.io).expect("polled after complete");
Poll::Ready(Ok(me.protocol.serve_connection(io, service)))
}
}

16
src/server/server_stub.rs Normal file
View File

@@ -0,0 +1,16 @@
use std::fmt;
use crate::common::exec::Exec;
/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
///
/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful.
pub struct Server<I, S, E = Exec> {
_marker: std::marker::PhantomData<(I, S, E)>,
}
impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Server").finish()
}
}

View File

@@ -5,7 +5,8 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::debug;
use super::accept::Accept;
use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
use super::conn::UpgradeableConnection;
use super::server::{Server, Watcher};
use crate::body::{Body, HttpBody};
use crate::common::drain::{self, Draining, Signal, Watch, Watching};
use crate::common::exec::{ConnStreamExec, NewSvcExec};
@@ -26,7 +27,7 @@ pin_project! {
Running {
drain: Option<(Signal, Watch)>,
#[pin]
spawn_all: SpawnAll<I, S, E>,
server: Server<I, S, E>,
#[pin]
signal: F,
},
@@ -35,12 +36,12 @@ pin_project! {
}
impl<I, S, F, E> Graceful<I, S, F, E> {
pub(super) fn new(spawn_all: SpawnAll<I, S, E>, signal: F) -> Self {
pub(super) fn new(server: Server<I, S, E>, signal: F) -> Self {
let drain = Some(drain::channel());
Graceful {
state: State::Running {
drain,
spawn_all,
server,
signal,
},
}
@@ -69,7 +70,7 @@ where
match me.state.as_mut().project() {
StateProj::Running {
drain,
spawn_all,
server,
signal,
} => match signal.poll(cx) {
Poll::Ready(()) => {
@@ -81,7 +82,7 @@ where
}
Poll::Pending => {
let watch = drain.as_ref().expect("drain channel").1.clone();
return spawn_all.poll_watch(cx, &GracefulWatcher(watch));
return server.poll_watch(cx, &GracefulWatcher(watch));
}
},
StateProj::Draining { ref mut draining } => {

View File

@@ -3,14 +3,12 @@ use std::io;
use std::net::{SocketAddr, TcpListener as StdTcpListener};
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::net::{TcpListener, TcpStream};
use tokio::time::Sleep;
use tracing::{debug, error, trace};
use crate::common::{task, Future, Pin, Poll};
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::addr_stream::AddrStream;
use super::accept::Accept;
/// A stream of connections from binding to an address.
@@ -98,7 +96,7 @@ impl AddrIncoming {
self.sleep_on_errors = val;
}
fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<AddrStream>> {
fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<TcpStream>> {
// Check if a previous timeout is active that was set by IO errors.
if let Some(ref mut to) = self.timeout {
ready!(Pin::new(to).poll(cx));
@@ -107,7 +105,7 @@ impl AddrIncoming {
loop {
match ready!(self.listener.poll_accept(cx)) {
Ok((socket, addr)) => {
Ok((socket, _)) => {
if let Some(dur) = self.tcp_keepalive_timeout {
let socket = socket2::SockRef::from(&socket);
let conf = socket2::TcpKeepalive::new().with_time(dur);
@@ -118,7 +116,7 @@ impl AddrIncoming {
if let Err(e) = socket.set_nodelay(self.tcp_nodelay) {
trace!("error trying to set TCP nodelay: {}", e);
}
return Poll::Ready(Ok(AddrStream::new(socket, addr)));
return Poll::Ready(Ok(socket));
}
Err(e) => {
// Connection errors can be ignored directly, continue by
@@ -154,7 +152,7 @@ impl AddrIncoming {
}
impl Accept for AddrIncoming {
type Conn = AddrStream;
type Conn = TcpStream;
type Error = io::Error;
fn poll_accept(
@@ -174,9 +172,12 @@ impl Accept for AddrIncoming {
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn is_connection_error(e: &io::Error) -> bool {
matches!(e.kind(), io::ErrorKind::ConnectionRefused
matches!(
e.kind(),
io::ErrorKind::ConnectionRefused
| io::ErrorKind::ConnectionAborted
| io::ErrorKind::ConnectionReset)
| io::ErrorKind::ConnectionReset
)
}
impl fmt::Debug for AddrIncoming {
@@ -189,114 +190,3 @@ impl fmt::Debug for AddrIncoming {
.finish()
}
}
mod addr_stream {
use std::io;
use std::net::SocketAddr;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, RawFd};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use crate::common::{task, Pin, Poll};
pin_project_lite::pin_project! {
/// A transport returned yieled by `AddrIncoming`.
#[derive(Debug)]
pub struct AddrStream {
#[pin]
inner: TcpStream,
pub(super) remote_addr: SocketAddr,
}
}
impl AddrStream {
pub(super) fn new(tcp: TcpStream, addr: SocketAddr) -> AddrStream {
AddrStream {
inner: tcp,
remote_addr: addr,
}
}
/// Returns the remote (peer) address of this connection.
#[inline]
pub fn remote_addr(&self) -> SocketAddr {
self.remote_addr
}
/// Consumes the AddrStream and returns the underlying IO object
#[inline]
pub fn into_inner(self) -> TcpStream {
self.inner
}
/// Attempt to receive data on the socket, without removing that data
/// from the queue, registering the current task for wakeup if data is
/// not yet available.
pub fn poll_peek(
&mut self,
cx: &mut task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<usize>> {
self.inner.poll_peek(cx, buf)
}
}
impl AsyncRead for AddrStream {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.project().inner.poll_read(cx, buf)
}
}
impl AsyncWrite for AddrStream {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write(cx, buf)
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write_vectored(cx, bufs)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
// TCP flush is a noop
Poll::Ready(Ok(()))
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().inner.poll_shutdown(cx)
}
#[inline]
fn is_write_vectored(&self) -> bool {
// Note that since `self.inner` is a `TcpStream`, this could
// *probably* be hard-coded to return `true`...but it seems more
// correct to ask it anyway (maybe we're on some platform without
// scatter-gather IO?)
self.inner.is_write_vectored()
}
}
#[cfg(unix)]
impl AsRawFd for AddrStream {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
}

View File

@@ -108,13 +108,13 @@ where
/// # async fn run() {
/// use std::convert::Infallible;
/// use hyper::{Body, Request, Response, Server};
/// use hyper::server::conn::AddrStream;
/// use tokio::net::TcpStream;
/// use hyper::service::{make_service_fn, service_fn};
///
/// let addr = ([127, 0, 0, 1], 3000).into();
///
/// let make_svc = make_service_fn(|socket: &AddrStream| {
/// let remote_addr = socket.remote_addr();
/// let make_svc = make_service_fn(|socket: &TcpStream| {
/// let remote_addr = socket.peer_addr().unwrap();
/// async move {
/// Ok::<_, Infallible>(service_fn(move |_: Request<Body>| async move {
/// Ok::<_, Infallible>(

View File

@@ -4,6 +4,7 @@
#[macro_use]
extern crate matches;
use std::convert::Infallible;
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
@@ -11,9 +12,11 @@ use std::task::{Context, Poll};
use std::thread;
use std::time::Duration;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::to_bytes as concat;
use hyper::{Body, Client, Method, Request, StatusCode};
use bytes::Bytes;
use futures_channel::oneshot;
use futures_core::{Future, Stream, TryFuture};
use futures_util::future::{self, FutureExt, TryFutureExt};
@@ -154,7 +157,7 @@ macro_rules! test {
.build(connector);
#[allow(unused_assignments, unused_mut)]
let mut body = Body::empty();
let mut body = BodyExt::boxed(http_body_util::Empty::<bytes::Bytes>::new());
let mut req_builder = Request::builder();
$(
test!(@client_request; req_builder, body, addr, $c_req_prop: $c_req_val);
@@ -232,7 +235,11 @@ macro_rules! __client_req_prop {
}};
($req_builder:ident, $body:ident, $addr:ident, body: $body_e:expr) => {{
$body = $body_e.into();
$body = BodyExt::boxed(http_body_util::Full::from($body_e));
}};
($req_builder:ident, $body:ident, $addr:ident, body_stream: $body_e:expr) => {{
$body = BodyExt::boxed(StreamBody::new($body_e));
}};
}
@@ -477,7 +484,11 @@ test! {
headers: {
"Content-Length" => "5",
},
body: (Body::wrap_stream(Body::from("hello"))),
// use a "stream" (where Body doesn't know length) with a
// content-length header
body_stream: (futures_util::stream::once(async {
Ok::<_, Infallible>(Bytes::from("hello"))
})),
},
response:
status: OK,
@@ -500,12 +511,14 @@ test! {
request: {
method: GET,
url: "http://{addr}/",
// wrap_steam means we don't know the content-length,
// steam means we don't know the content-length,
// but we're wrapping a non-empty stream.
//
// But since the headers cannot tell us, and the method typically
// doesn't have a body, the body must be ignored.
body: (Body::wrap_stream(Body::from("hello"))),
body_stream: (futures_util::stream::once(async {
Ok::<_, Infallible>(Bytes::from("hello"))
})),
},
response:
status: OK,
@@ -532,11 +545,13 @@ test! {
"transfer-encoding" => "chunked",
},
version: HTTP_10,
// wrap_steam means we don't know the content-length,
// steam means we don't know the content-length,
// but we're wrapping a non-empty stream.
//
// But since the headers cannot tell us, the body must be ignored.
body: (Body::wrap_stream(Body::from("hello"))),
body_stream: (futures_util::stream::once(async {
Ok::<_, Infallible>(Bytes::from("hello"))
})),
},
response:
status: OK,
@@ -621,7 +636,10 @@ test! {
request: {
method: POST,
url: "http://{addr}/chunks",
body: (Body::wrap_stream(Body::from("foo bar baz"))),
// use a stream to "hide" that the full amount is known
body_stream: (futures_util::stream::once(async {
Ok::<_, Infallible>(Bytes::from("foo bar baz"))
})),
},
response:
status: OK,
@@ -1704,15 +1722,15 @@ mod dispatch_impl {
let delayed_body = rx1
.then(|_| tokio::time::sleep(Duration::from_millis(200)))
.map(|_| Ok::<_, ()>("hello a"))
.map_err(|_| -> hyper::Error { panic!("rx1") })
.map(|_| Ok::<_, ()>(Bytes::from("hello a")))
.map_err(|_| -> std::convert::Infallible { panic!("rx1") })
.into_stream();
let rx = rx2.expect("thread panicked");
let req = Request::builder()
.method("POST")
.uri(&*format!("http://{}/a", addr))
.body(Body::wrap_stream(delayed_body))
.body(BodyExt::boxed(StreamBody::new(delayed_body)))
.unwrap();
let client2 = client.clone();
@@ -1724,7 +1742,7 @@ mod dispatch_impl {
let rx = rx3.expect("thread panicked");
let req = Request::builder()
.uri(&*format!("http://{}/b", addr))
.body(Body::empty())
.body(BodyExt::boxed(http_body_util::Empty::new()))
.unwrap();
future::join(client2.request(req), rx).map(|r| r.0)
});
@@ -2160,11 +2178,11 @@ mod conn {
use bytes::Buf;
use futures_channel::oneshot;
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
use futures_util::StreamExt;
use hyper::upgrade::OnUpgrade;
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
use hyper::body::HttpBody;
use hyper::client::conn;
use hyper::{self, Body, Method, Request, Response, StatusCode};
@@ -2208,7 +2226,7 @@ mod conn {
.unwrap();
let mut res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), hyper::StatusCode::OK);
assert!(res.body_mut().next().await.is_none());
assert!(res.body_mut().data().await.is_none());
};
future::join(server, client).await;
@@ -2265,7 +2283,63 @@ mod conn {
res.headers().get("line-folded-header").unwrap(),
"hello world"
);
assert!(res.body_mut().next().await.is_none());
assert!(res.body_mut().data().await.is_none());
};
future::join(server, client).await;
}
#[tokio::test]
async fn get_custom_reason_phrase() {
let _ = ::pretty_env_logger::try_init();
let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
let server = async move {
let mut sock = listener.accept().await.unwrap().0;
let mut buf = [0; 4096];
let n = sock.read(&mut buf).await.expect("read 1");
// Notably:
// - Just a path, since just a path was set
// - No host, since no host was set
let expected = "GET /a HTTP/1.1\r\n\r\n";
assert_eq!(s(&buf[..n]), expected);
sock.write_all(b"HTTP/1.1 200 Alright\r\nContent-Length: 0\r\n\r\n")
.await
.unwrap();
};
let client = async move {
let tcp = tcp_connect(&addr).await.expect("connect");
let (mut client, conn) = conn::handshake(tcp).await.expect("handshake");
tokio::task::spawn(async move {
conn.await.expect("http conn");
});
let req = Request::builder()
.uri("/a")
.body(Default::default())
.unwrap();
let mut res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), hyper::StatusCode::OK);
assert_eq!(
res.extensions()
.get::<hyper::ext::ReasonPhrase>()
.expect("custom reason phrase is present")
.as_bytes(),
&b"Alright"[..]
);
assert_eq!(res.headers().len(), 1);
assert_eq!(
res.headers().get(http::header::CONTENT_LENGTH).unwrap(),
"0"
);
assert!(res.body_mut().data().await.is_none());
};
future::join(server, client).await;

View File

@@ -1,6 +1,7 @@
#![deny(warnings)]
#![deny(rust_2018_idioms)]
use std::convert::TryInto;
use std::future::Future;
use std::io::{self, Read, Write};
use std::net::TcpListener as StdTcpListener;
@@ -16,11 +17,10 @@ use std::time::Duration;
use bytes::Bytes;
use futures_channel::oneshot;
use futures_util::future::{self, Either, FutureExt, TryFutureExt};
#[cfg(feature = "stream")]
use futures_util::stream::StreamExt as _;
use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, StreamBody};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio::net::{TcpListener, TcpStream as TkTcpStream};
@@ -109,8 +109,7 @@ mod response_body_lengths {
b
}
Bd::Unknown(b) => {
let (mut tx, body) = hyper::Body::channel();
tx.try_send_data(b.into()).expect("try_send_data");
let body = futures_util::stream::once(async move { Ok(b.into()) });
reply.body_stream(body);
b
}
@@ -383,6 +382,33 @@ mod response_body_lengths {
}
}
#[test]
fn get_response_custom_reason_phrase() {
let _ = pretty_env_logger::try_init();
let server = serve();
server.reply().reason_phrase("Cool");
let mut req = connect(server.addr());
req.write_all(
b"\
GET / HTTP/1.1\r\n\
Host: example.domain\r\n\
Connection: close\r\n\
\r\n\
",
)
.unwrap();
let mut response = String::new();
req.read_to_string(&mut response).unwrap();
let mut lines = response.lines();
assert_eq!(lines.next(), Some("HTTP/1.1 200 Cool"));
let mut lines = lines.skip_while(|line| !line.is_empty());
assert_eq!(lines.next(), Some(""));
assert_eq!(lines.next(), None);
}
#[test]
fn get_chunked_response_with_ka() {
let foo_bar = b"foo bar baz";
@@ -1816,6 +1842,7 @@ async fn h2_connect() {
#[tokio::test]
async fn h2_connect_multiplex() {
use futures_util::stream::FuturesUnordered;
use futures_util::StreamExt;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
@@ -2164,17 +2191,17 @@ async fn max_buf_size() {
.expect_err("should TooLarge error");
}
#[cfg(feature = "stream")]
#[test]
fn streaming_body() {
use futures_util::StreamExt;
let _ = pretty_env_logger::try_init();
// disable keep-alive so we can use read_to_end
let server = serve_opts().keep_alive(false).serve();
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_00] as _;
let b = futures_util::stream::iter(S.iter()).map(|&s| Ok::<_, hyper::Error>(s));
let b = hyper::Body::wrap_stream(b);
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 100] as _;
let b =
futures_util::stream::iter(S.iter()).map(|&s| Ok::<_, BoxError>(Bytes::copy_from_slice(s)));
server.reply().body_stream(b);
let mut tcp = connect(server.addr());
@@ -2272,18 +2299,15 @@ async fn http2_service_error_sends_reset_reason() {
assert_eq!(h2_err.reason(), Some(h2::Reason::INADEQUATE_SECURITY));
}
#[cfg(feature = "stream")]
#[test]
fn http2_body_user_error_sends_reset_reason() {
use std::error::Error;
let server = serve();
let addr_str = format!("http://{}", server.addr());
let b = futures_util::stream::once(future::err::<String, _>(h2::Error::from(
let b = futures_util::stream::once(future::err::<Bytes, BoxError>(Box::new(h2::Error::from(
h2::Reason::INADEQUATE_SECURITY,
)));
let b = hyper::Body::wrap_stream(b);
))));
server.reply().body_stream(b);
let rt = support::runtime();
@@ -2297,7 +2321,7 @@ fn http2_body_user_error_sends_reset_reason() {
let mut res = client.get(uri).await?;
while let Some(chunk) = res.body_mut().next().await {
while let Some(chunk) = res.body_mut().data().await {
chunk?;
}
Ok(())
@@ -2421,6 +2445,26 @@ fn skips_content_length_and_body_for_304_responses() {
assert_eq!(lines.next(), None);
}
#[test]
fn no_implicit_zero_content_length_for_head_responses() {
let server = serve();
server.reply().status(hyper::StatusCode::OK).body([]);
let mut req = connect(server.addr());
req.write_all(
b"\
HEAD / HTTP/1.1\r\n\
Host: example.domain\r\n\
Connection: close\r\n\
\r\n\
",
)
.unwrap();
let mut response = String::new();
req.read_to_string(&mut response).unwrap();
assert!(!response.contains("content-length:"));
}
#[tokio::test]
async fn http2_keep_alive_detects_unresponsive_client() {
let _ = pretty_env_logger::try_init();
@@ -2639,7 +2683,7 @@ impl Serve {
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
type BoxFuture = Pin<Box<dyn Future<Output = Result<Response<Body>, BoxError>> + Send>>;
type BoxFuture = Pin<Box<dyn Future<Output = Result<Response<ReplyBody>, BoxError>> + Send>>;
struct ReplyBuilder<'a> {
tx: &'a Mutex<spmc::Sender<Reply>>,
@@ -2651,6 +2695,17 @@ impl<'a> ReplyBuilder<'a> {
self
}
fn reason_phrase(self, reason: &str) -> Self {
self.tx
.lock()
.unwrap()
.send(Reply::ReasonPhrase(
reason.as_bytes().try_into().expect("reason phrase"),
))
.unwrap();
self
}
fn version(self, version: hyper::Version) -> Self {
self.tx
.lock()
@@ -2672,14 +2727,16 @@ impl<'a> ReplyBuilder<'a> {
}
fn body<T: AsRef<[u8]>>(self, body: T) {
self.tx
.lock()
.unwrap()
.send(Reply::Body(body.as_ref().to_vec().into()))
.unwrap();
let chunk = Bytes::copy_from_slice(body.as_ref());
let body = BodyExt::boxed(http_body_util::Full::new(chunk).map_err(|e| match e {}));
self.tx.lock().unwrap().send(Reply::Body(body)).unwrap();
}
fn body_stream(self, body: Body) {
fn body_stream<S>(self, stream: S)
where
S: futures_util::Stream<Item = Result<Bytes, BoxError>> + Send + Sync + 'static,
{
let body = BodyExt::boxed(StreamBody::new(stream));
self.tx.lock().unwrap().send(Reply::Body(body)).unwrap();
}
@@ -2721,12 +2778,15 @@ struct TestService {
reply: spmc::Receiver<Reply>,
}
type ReplyBody = BoxBody<Bytes, BoxError>;
#[derive(Debug)]
enum Reply {
Status(hyper::StatusCode),
ReasonPhrase(hyper::ext::ReasonPhrase),
Version(hyper::Version),
Header(HeaderName, HeaderValue),
Body(hyper::Body),
Body(ReplyBody),
Error(BoxError),
End,
}
@@ -2739,7 +2799,7 @@ enum Msg {
}
impl tower_service::Service<Request<Body>> for TestService {
type Response = Response<Body>;
type Response = Response<ReplyBody>;
type Error = BoxError;
type Future = BoxFuture;
@@ -2772,13 +2832,18 @@ impl tower_service::Service<Request<Body>> for TestService {
}
impl TestService {
fn build_reply(replies: spmc::Receiver<Reply>) -> Result<Response<Body>, BoxError> {
let mut res = Response::new(Body::empty());
fn build_reply(replies: spmc::Receiver<Reply>) -> Result<Response<ReplyBody>, BoxError> {
let empty =
BodyExt::boxed(http_body_util::Empty::new().map_err(|e| -> BoxError { match e {} }));
let mut res = Response::new(empty);
while let Ok(reply) = replies.try_recv() {
match reply {
Reply::Status(s) => {
*res.status_mut() = s;
}
Reply::ReasonPhrase(reason) => {
res.extensions_mut().insert(reason);
}
Reply::Version(v) => {
*res.version_mut() = v;
}
@@ -2817,7 +2882,7 @@ impl tower_service::Service<Request<Body>> for HelloWorld {
fn unreachable_service() -> impl tower_service::Service<
http::Request<hyper::Body>,
Response = http::Response<hyper::Body>,
Response = http::Response<ReplyBody>,
Error = BoxError,
Future = BoxFuture,
> {