From 294000c0745c64009151a1ab39978cd6f02dfd68 Mon Sep 17 00:00:00 2001 From: Vitaly Shukela Date: Sat, 29 Oct 2022 21:22:48 +0300 Subject: [PATCH 01/56] Fix docs of enable_push (#646) Remove redundant and misleading phrase in client::Builder::enable_push documentation. Resolves #645 --- src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index a6c649811..411afa5e0 100644 --- a/src/client.rs +++ b/src/client.rs @@ -986,8 +986,8 @@ impl Builder { /// Enables or disables server push promises. /// - /// This value is included in the initial SETTINGS handshake. When set, the - /// server MUST NOT send a push promise. Setting this value to value to + /// This value is included in the initial SETTINGS handshake. + /// Setting this value to value to /// false in the initial SETTINGS handshake guarantees that the remote server /// will never send a push promise. /// From af47a086b6bb5d2d216301bf9673dee6e95ab4c3 Mon Sep 17 00:00:00 2001 From: silence-coding <32766901+silence-coding@users.noreply.github.com> Date: Fri, 2 Dec 2022 23:07:57 +0800 Subject: [PATCH 02/56] fix issue#648: drop frame if stream is released (#651) Co-authored-by: p00512853 --- src/proto/streams/recv.rs | 10 ++++++++++ src/proto/streams/stream.rs | 4 ++++ src/proto/streams/streams.rs | 3 ++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 3af1af3a1..21c575a1a 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -600,6 +600,16 @@ impl Recv { } } + // Received a frame, but no one cared about it. fix issue#648 + if !stream.is_recv { + tracing::trace!( + "recv_data; frame ignored on stream release {:?} for some time", + stream.id, + ); + self.release_connection_capacity(sz, &mut None); + return Ok(()); + } + // Update stream level flow control stream.recv_flow.send_data(sz); diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 36d515bad..de7f4f641 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -99,6 +99,9 @@ pub(super) struct Stream { /// Frames pending for this stream to read pub pending_recv: buffer::Deque, + /// When the RecvStream drop occurs, no data should be received. + pub is_recv: bool, + /// Task tracking receiving frames pub recv_task: Option, @@ -180,6 +183,7 @@ impl Stream { reset_at: None, next_reset_expire: None, pending_recv: buffer::Deque::new(), + is_recv: true, recv_task: None, pending_push_promises: store::Queue::new(), content_length: ContentLength::Omitted, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index aee64ca6a..4bd671b07 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1345,12 +1345,13 @@ impl OpaqueStreamRef { .release_capacity(capacity, &mut stream, &mut me.actions.task) } + /// Clear the receive queue and set the status to no longer receive data frames. pub(crate) fn clear_recv_buffer(&mut self) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); - + stream.is_recv = false; me.actions.recv.clear_recv_buffer(&mut stream); } From c1ce37e1678af6186b2a12574e5bd1ef1ad39c26 Mon Sep 17 00:00:00 2001 From: gtsiam Date: Mon, 12 Dec 2022 19:18:35 +0200 Subject: [PATCH 03/56] Remove unnecessary Unpin + 'static bounds on body (#649) --- src/client.rs | 10 +++++----- src/proto/streams/streams.rs | 5 +---- src/server.rs | 8 ++++---- tests/h2-support/src/client_ext.rs | 2 +- tests/h2-support/src/prelude.rs | 2 +- 5 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/client.rs b/src/client.rs index 411afa5e0..5dd0b0f87 100644 --- a/src/client.rs +++ b/src/client.rs @@ -341,7 +341,7 @@ pub(crate) struct Peer; impl SendRequest where - B: Buf + 'static, + B: Buf, { /// Returns `Ready` when the connection can initialize a new HTTP/2 /// stream. @@ -584,7 +584,7 @@ where impl Future for ReadySendRequest where - B: Buf + 'static, + B: Buf, { type Output = Result, crate::Error>; @@ -1100,7 +1100,7 @@ impl Builder { ) -> impl Future, Connection), crate::Error>> where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { Connection::handshake2(io, self.clone()) } @@ -1177,7 +1177,7 @@ where impl Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { async fn handshake2( mut io: T, @@ -1306,7 +1306,7 @@ where impl Future for Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Output = Result<(), crate::Error>; diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 4bd671b07..62c55524c 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1229,10 +1229,7 @@ impl StreamRef { .map_err(From::from) } - pub fn clone_to_opaque(&self) -> OpaqueStreamRef - where - B: 'static, - { + pub fn clone_to_opaque(&self) -> OpaqueStreamRef { self.opaque.clone() } diff --git a/src/server.rs b/src/server.rs index 9f56f184a..6e216a40a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -364,7 +364,7 @@ where impl Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { fn handshake2(io: T, builder: Builder) -> Handshake { let span = tracing::trace_span!("server_handshake"); @@ -582,7 +582,7 @@ where impl futures_core::Stream for Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Item = Result<(Request, SendResponse), crate::Error>; @@ -1007,7 +1007,7 @@ impl Builder { pub fn handshake(&self, io: T) -> Handshake where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { Connection::handshake2(io, self.clone()) } @@ -1262,7 +1262,7 @@ where impl Future for Handshake where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Output = Result, crate::Error>; diff --git a/tests/h2-support/src/client_ext.rs b/tests/h2-support/src/client_ext.rs index a9ab71d99..eebbae98b 100644 --- a/tests/h2-support/src/client_ext.rs +++ b/tests/h2-support/src/client_ext.rs @@ -11,7 +11,7 @@ pub trait SendRequestExt { impl SendRequestExt for SendRequest where - B: Buf + Unpin + 'static, + B: Buf, { fn get(&mut self, uri: &str) -> ResponseFuture { let req = Request::builder() diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index 86ef3249e..d34f1b996 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -90,7 +90,7 @@ pub trait ClientExt { impl ClientExt for client::Connection where T: AsyncRead + AsyncWrite + Unpin + 'static, - B: Buf + Unpin + 'static, + B: Buf, { fn run<'a, F: Future + Unpin + 'a>( &'a mut self, From 07d20b19abfd3bf57bd80976089e3c24a3166bca Mon Sep 17 00:00:00 2001 From: gtsiam Date: Mon, 12 Dec 2022 22:13:48 +0200 Subject: [PATCH 04/56] Fix all clippy warnings (#652) --- examples/akamai.rs | 5 +- src/codec/framed_read.rs | 10 +-- src/frame/data.rs | 8 +- src/frame/headers.rs | 13 +-- src/frame/settings.rs | 6 +- src/hpack/decoder.rs | 29 +++---- src/hpack/encoder.rs | 8 +- src/hpack/header.rs | 26 +++--- src/hpack/huffman/mod.rs | 9 +- src/hpack/table.rs | 2 +- src/hpack/test/fixture.rs | 10 +-- src/hpack/test/fuzz.rs | 2 +- src/lib.rs | 1 + src/proto/connection.rs | 2 +- src/proto/error.rs | 4 +- src/proto/ping_pong.rs | 5 +- src/proto/streams/flow_control.rs | 3 +- src/proto/streams/mod.rs | 1 + src/proto/streams/prioritize.rs | 68 ++++++++------- src/proto/streams/recv.rs | 78 +++++++++--------- src/proto/streams/send.rs | 110 +++++++++++++------------ src/proto/streams/state.rs | 69 +++++++--------- src/proto/streams/store.rs | 12 ++- src/proto/streams/stream.rs | 11 +-- src/proto/streams/streams.rs | 24 +++--- src/server.rs | 2 +- src/share.rs | 4 +- tests/h2-support/src/frames.rs | 4 +- tests/h2-support/src/mock.rs | 4 +- tests/h2-support/src/prelude.rs | 2 +- tests/h2-support/src/util.rs | 4 +- tests/h2-tests/tests/client_request.rs | 9 +- tests/h2-tests/tests/hammer.rs | 2 +- tests/h2-tests/tests/ping_pong.rs | 6 +- tests/h2-tests/tests/push_promise.rs | 2 +- tests/h2-tests/tests/server.rs | 4 +- tests/h2-tests/tests/stream_states.rs | 2 +- util/genfixture/src/main.rs | 12 +-- util/genhuff/src/main.rs | 10 +-- 39 files changed, 283 insertions(+), 300 deletions(-) diff --git a/examples/akamai.rs b/examples/akamai.rs index e522b37ff..1d0b17baf 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -50,10 +50,7 @@ pub async fn main() -> Result<(), Box> { { let (_, session) = tls.get_ref(); let negotiated_protocol = session.alpn_protocol(); - assert_eq!( - Some(ALPN_H2.as_bytes()), - negotiated_protocol.as_ref().map(|x| &**x) - ); + assert_eq!(Some(ALPN_H2.as_bytes()), negotiated_protocol); } println!("Starting client handshake"); diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 7c3bbb3ba..a874d7732 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -109,7 +109,7 @@ fn decode_frame( if partial_inout.is_some() && head.kind() != Kind::Continuation { proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } let kind = head.kind(); @@ -231,7 +231,7 @@ fn decode_frame( if head.stream_id() == 0 { // Invalid stream identifier proto_err!(conn: "invalid stream ID 0"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { @@ -257,14 +257,14 @@ fn decode_frame( Some(partial) => partial, None => { proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; // The stream identifiers must match if partial.frame.stream_id() != head.stream_id() { proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } // Extend the buf @@ -287,7 +287,7 @@ fn decode_frame( // the attacker to go away. if partial.buf.len() + bytes.len() > max_header_list_size { proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into()); + return Err(Error::library_go_away(Reason::COMPRESSION_ERROR)); } } partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); diff --git a/src/frame/data.rs b/src/frame/data.rs index e253d5e23..d0cdf5f69 100644 --- a/src/frame/data.rs +++ b/src/frame/data.rs @@ -16,7 +16,7 @@ pub struct Data { pad_len: Option, } -#[derive(Copy, Clone, Eq, PartialEq)] +#[derive(Copy, Clone, Default, Eq, PartialEq)] struct DataFlags(u8); const END_STREAM: u8 = 0x1; @@ -211,12 +211,6 @@ impl DataFlags { } } -impl Default for DataFlags { - fn default() -> Self { - DataFlags(0) - } -} - impl From for u8 { fn from(src: DataFlags) -> u8 { src.0 diff --git a/src/frame/headers.rs b/src/frame/headers.rs index bcb905013..9d5c8cefe 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -309,17 +309,20 @@ impl fmt::Debug for Headers { // ===== util ===== -pub fn parse_u64(src: &[u8]) -> Result { +#[derive(Debug, PartialEq, Eq)] +pub struct ParseU64Error; + +pub fn parse_u64(src: &[u8]) -> Result { if src.len() > 19 { // At danger for overflow... - return Err(()); + return Err(ParseU64Error); } let mut ret = 0; for &d in src { if d < b'0' || d > b'9' { - return Err(()); + return Err(ParseU64Error); } ret *= 10; @@ -333,7 +336,7 @@ pub fn parse_u64(src: &[u8]) -> Result { #[derive(Debug)] pub enum PushPromiseHeaderError { - InvalidContentLength(Result), + InvalidContentLength(Result), NotSafeAndCacheable, } @@ -381,7 +384,7 @@ impl PushPromise { fn safe_and_cacheable(method: &Method) -> bool { // Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods // Safe: https://httpwg.org/specs/rfc7231.html#safe.methods - return method == Method::GET || method == Method::HEAD; + method == Method::GET || method == Method::HEAD } pub fn fields(&self) -> &HeaderMap { diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 080d0f4e5..0c913f059 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -182,10 +182,10 @@ impl Settings { } } Some(MaxFrameSize(val)) => { - if val < DEFAULT_MAX_FRAME_SIZE || val > MAX_MAX_FRAME_SIZE { - return Err(Error::InvalidSettingValue); - } else { + if DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE { settings.max_frame_size = Some(val); + } else { + return Err(Error::InvalidSettingValue); } } Some(MaxHeaderListSize(val)) => { diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 988b48db1..b45c37927 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -852,8 +852,7 @@ mod test { fn test_decode_empty() { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); - let empty = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); - assert_eq!(empty, ()); + let _: () = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); } #[test] @@ -861,17 +860,16 @@ mod test { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); - buf.extend(&[0b01000000, 0x80 | 2]); + buf.extend([0b01000000, 0x80 | 2]); buf.extend(huff_encode(b"foo")); - buf.extend(&[0x80 | 3]); + buf.extend([0x80 | 3]); buf.extend(huff_encode(b"bar")); let mut res = vec![]; - let _ = de - .decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); + de.decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); assert_eq!(res.len(), 1); assert_eq!(de.table.size(), 0); @@ -900,10 +898,10 @@ mod test { let value = huff_encode(b"bar"); let mut buf = BytesMut::new(); // header name is non_huff encoded - buf.extend(&[0b01000000, 0x00 | 3]); + buf.extend([0b01000000, 3]); buf.extend(b"foo"); // header value is partial - buf.extend(&[0x80 | 3]); + buf.extend([0x80 | 3]); buf.extend(&value[0..1]); let mut res = vec![]; @@ -917,11 +915,10 @@ mod test { // extend buf with the remaining header value buf.extend(&value[1..]); - let _ = de - .decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); + de.decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); assert_eq!(res.len(), 1); assert_eq!(de.table.size(), 0); diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index 76b373830..d121a2aaf 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -118,12 +118,12 @@ impl Encoder { encode_int(idx, 7, 0x80, dst); } Index::Name(idx, _) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst); } Index::Inserted(_) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); assert!(!header.is_sensitive()); @@ -133,7 +133,7 @@ impl Encoder { encode_str(header.value_slice(), dst); } Index::InsertedValue(idx, _) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); assert!(!header.is_sensitive()); @@ -141,7 +141,7 @@ impl Encoder { encode_str(header.value_slice(), dst); } Index::NotIndexed(_) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); encode_not_indexed2( header.name().as_slice(), diff --git a/src/hpack/header.rs b/src/hpack/header.rs index e6df555ab..0b5d1fded 100644 --- a/src/hpack/header.rs +++ b/src/hpack/header.rs @@ -190,18 +190,18 @@ impl Header { use http::header; match *self { - Header::Field { ref name, .. } => match *name { + Header::Field { ref name, .. } => matches!( + *name, header::AGE - | header::AUTHORIZATION - | header::CONTENT_LENGTH - | header::ETAG - | header::IF_MODIFIED_SINCE - | header::IF_NONE_MATCH - | header::LOCATION - | header::COOKIE - | header::SET_COOKIE => true, - _ => false, - }, + | header::AUTHORIZATION + | header::CONTENT_LENGTH + | header::ETAG + | header::IF_MODIFIED_SINCE + | header::IF_NONE_MATCH + | header::LOCATION + | header::COOKIE + | header::SET_COOKIE + ), Header::Path(..) => true, _ => false, } @@ -231,10 +231,10 @@ impl<'a> Name<'a> { match self { Name::Field(name) => Ok(Header::Field { name: name.clone(), - value: HeaderValue::from_bytes(&*value)?, + value: HeaderValue::from_bytes(&value)?, }), Name::Authority => Ok(Header::Authority(BytesStr::try_from(value)?)), - Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), + Name::Method => Ok(Header::Method(Method::from_bytes(&value)?)), Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)), diff --git a/src/hpack/huffman/mod.rs b/src/hpack/huffman/mod.rs index 07b3fd925..86c97eb58 100644 --- a/src/hpack/huffman/mod.rs +++ b/src/hpack/huffman/mod.rs @@ -112,7 +112,7 @@ mod test { #[test] fn decode_single_byte() { assert_eq!("o", decode(&[0b00111111]).unwrap()); - assert_eq!("0", decode(&[0x0 + 7]).unwrap()); + assert_eq!("0", decode(&[7]).unwrap()); assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap()); } @@ -138,7 +138,7 @@ mod test { dst.clear(); encode(b"0", &mut dst); - assert_eq!(&dst[..], &[0x0 + 7]); + assert_eq!(&dst[..], &[7]); dst.clear(); encode(b"A", &mut dst); @@ -147,7 +147,7 @@ mod test { #[test] fn encode_decode_str() { - const DATA: &'static [&'static str] = &[ + const DATA: &[&str] = &[ "hello world", ":method", ":scheme", @@ -184,8 +184,7 @@ mod test { #[test] fn encode_decode_u8() { - const DATA: &'static [&'static [u8]] = - &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; + const DATA: &[&[u8]] = &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; for s in DATA { let mut dst = BytesMut::with_capacity(s.len()); diff --git a/src/hpack/table.rs b/src/hpack/table.rs index 0124f216d..a1a780451 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -404,7 +404,7 @@ impl Table { // Find the associated position probe_loop!(probe < self.indices.len(), { - debug_assert!(!self.indices[probe].is_none()); + debug_assert!(self.indices[probe].is_some()); let mut pos = self.indices[probe].unwrap(); diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 3428c3958..0d33ca2de 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -52,8 +52,8 @@ fn test_story(story: Value) { Case { seqno: case.get("seqno").unwrap().as_u64().unwrap(), - wire: wire, - expect: expect, + wire, + expect, header_table_size: size, } }) @@ -142,10 +142,10 @@ fn key_str(e: &Header) -> &str { fn value_str(e: &Header) -> &str { match *e { Header::Field { ref value, .. } => value.to_str().unwrap(), - Header::Authority(ref v) => &**v, + Header::Authority(ref v) => v, Header::Method(ref m) => m.as_str(), - Header::Scheme(ref v) => &**v, - Header::Path(ref v) => &**v, + Header::Scheme(ref v) => v, + Header::Path(ref v) => v, Header::Protocol(ref v) => v.as_str(), Header::Status(ref v) => v.as_str(), } diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index ad0d47b6b..af9e8ea23 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -80,7 +80,7 @@ impl FuzzHpack { let high = rng.gen_range(128..MAX_CHUNK * 2); let low = rng.gen_range(0..high); - frame.resizes.extend(&[low, high]); + frame.resizes.extend([low, high]); } 1..=3 => { frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2)); diff --git a/src/lib.rs b/src/lib.rs index 376d15c9a..e7b95035f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,6 +81,7 @@ #![doc(html_root_url = "https://docs.rs/h2/0.3.15")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] +#![allow(clippy::type_complexity, clippy::manual_range_contains)] macro_rules! proto_err { (conn: $($msg:tt)+) => { diff --git a/src/proto/connection.rs b/src/proto/connection.rs index cd011a1d5..59883cf33 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -215,7 +215,7 @@ where }); match (ours, theirs) { - (Reason::NO_ERROR, Reason::NO_ERROR) => return Ok(()), + (Reason::NO_ERROR, Reason::NO_ERROR) => Ok(()), (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), // If both sides reported an error, give their // error back to th user. We assume our error diff --git a/src/proto/error.rs b/src/proto/error.rs index 197237263..2c00c7ea6 100644 --- a/src/proto/error.rs +++ b/src/proto/error.rs @@ -13,7 +13,7 @@ pub enum Error { Io(io::ErrorKind, Option), } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Initiator { User, Library, @@ -70,7 +70,7 @@ impl fmt::Display for Error { impl From for Error { fn from(src: io::ErrorKind) -> Self { - Error::Io(src.into(), None) + Error::Io(src, None) } } diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs index 844c5fbb9..59023e26a 100644 --- a/src/proto/ping_pong.rs +++ b/src/proto/ping_pong.rs @@ -200,10 +200,7 @@ impl PingPong { impl ReceivedPing { pub(crate) fn is_shutdown(&self) -> bool { - match *self { - ReceivedPing::Shutdown => true, - _ => false, - } + matches!(*self, Self::Shutdown) } } diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index 4a47f08dd..b1b2745eb 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -19,6 +19,7 @@ const UNCLAIMED_NUMERATOR: i32 = 1; const UNCLAIMED_DENOMINATOR: i32 = 2; #[test] +#[allow(clippy::assertions_on_constants)] fn sanity_unclaimed_ratio() { assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR); assert!(UNCLAIMED_NUMERATOR >= 0); @@ -188,7 +189,7 @@ impl FlowControl { /// /// This type tries to centralize the knowledge of addition and subtraction /// to this capacity, instead of having integer casts throughout the source. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)] pub struct Window(i32); impl Window { diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index de2a2c85a..0ff8131c1 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -7,6 +7,7 @@ mod send; mod state; mod store; mod stream; +#[allow(clippy::module_inception)] mod streams; pub(crate) use self::prioritize::Prioritized; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index c2904aca9..329e55022 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -7,9 +7,11 @@ use crate::codec::UserError; use crate::codec::UserError::*; use bytes::buf::{Buf, Take}; -use std::io; -use std::task::{Context, Poll, Waker}; -use std::{cmp, fmt, mem}; +use std::{ + cmp::{self, Ordering}, + fmt, io, mem, + task::{Context, Poll, Waker}, +}; /// # Warning /// @@ -235,39 +237,43 @@ impl Prioritize { // If it were less, then we could never send out the buffered data. let capacity = (capacity as usize) + stream.buffered_send_data; - if capacity == stream.requested_send_capacity as usize { - // Nothing to do - } else if capacity < stream.requested_send_capacity as usize { - // Update the target requested capacity - stream.requested_send_capacity = capacity as WindowSize; + match capacity.cmp(&(stream.requested_send_capacity as usize)) { + Ordering::Equal => { + // Nothing to do + } + Ordering::Less => { + // Update the target requested capacity + stream.requested_send_capacity = capacity as WindowSize; - // Currently available capacity assigned to the stream - let available = stream.send_flow.available().as_size(); + // Currently available capacity assigned to the stream + let available = stream.send_flow.available().as_size(); - // If the stream has more assigned capacity than requested, reclaim - // some for the connection - if available as usize > capacity { - let diff = available - capacity as WindowSize; + // If the stream has more assigned capacity than requested, reclaim + // some for the connection + if available as usize > capacity { + let diff = available - capacity as WindowSize; - stream.send_flow.claim_capacity(diff); + stream.send_flow.claim_capacity(diff); - self.assign_connection_capacity(diff, stream, counts); - } - } else { - // If trying to *add* capacity, but the stream send side is closed, - // there's nothing to be done. - if stream.state.is_send_closed() { - return; + self.assign_connection_capacity(diff, stream, counts); + } } + Ordering::Greater => { + // If trying to *add* capacity, but the stream send side is closed, + // there's nothing to be done. + if stream.state.is_send_closed() { + return; + } - // Update the target requested capacity - stream.requested_send_capacity = - cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; + // Update the target requested capacity + stream.requested_send_capacity = + cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; - // Try to assign additional capacity to the stream. If none is - // currently available, the stream will be queued to receive some - // when more becomes available. - self.try_assign_capacity(stream); + // Try to assign additional capacity to the stream. If none is + // currently available, the stream will be queued to receive some + // when more becomes available. + self.try_assign_capacity(stream); + } } } @@ -372,11 +378,11 @@ impl Prioritize { continue; } - counts.transition(stream, |_, mut stream| { + counts.transition(stream, |_, stream| { // Try to assign capacity to the stream. This will also re-queue the // stream if there isn't enough connection level capacity to fulfill // the capacity request. - self.try_assign_capacity(&mut stream); + self.try_assign_capacity(stream); }) } } diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 21c575a1a..497efc9bc 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -2,12 +2,12 @@ use super::*; use crate::codec::UserError; use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; use crate::proto::{self, Error}; -use std::task::Context; use http::{HeaderMap, Request, Response}; +use std::cmp::Ordering; use std::io; -use std::task::{Poll, Waker}; +use std::task::{Context, Poll, Waker}; use std::time::{Duration, Instant}; #[derive(Debug)] @@ -178,7 +178,7 @@ impl Recv { if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) { let content_length = match frame::parse_u64(content_length.as_bytes()) { Ok(v) => v, - Err(()) => { + Err(_) => { proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } @@ -221,11 +221,12 @@ impl Recv { let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); - if pseudo.protocol.is_some() { - if counts.peer().is_server() && !self.is_extended_connect_protocol_enabled { - proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); - } + if pseudo.protocol.is_some() + && counts.peer().is_server() + && !self.is_extended_connect_protocol_enabled + { + proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } if !pseudo.is_informational() { @@ -487,28 +488,32 @@ impl Recv { // flow-controlled frames until it receives WINDOW_UPDATE frames that // cause the flow-control window to become positive. - if target < old_sz { - // We must decrease the (local) window on every open stream. - let dec = old_sz - target; - tracing::trace!("decrementing all windows; dec={}", dec); - - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - }) - } else if target > old_sz { - // We must increase the (local) window on every open stream. - let inc = target - old_sz; - tracing::trace!("incrementing all windows; inc={}", inc); - store.try_for_each(|mut stream| { - // XXX: Shouldn't the peer have already noticed our - // overflow and sent us a GOAWAY? - stream - .recv_flow - .inc_window(inc) - .map_err(proto::Error::library_go_away)?; - stream.recv_flow.assign_capacity(inc); - Ok::<_, proto::Error>(()) - })?; + match target.cmp(&old_sz) { + Ordering::Less => { + // We must decrease the (local) window on every open stream. + let dec = old_sz - target; + tracing::trace!("decrementing all windows; dec={}", dec); + + store.for_each(|mut stream| { + stream.recv_flow.dec_recv_window(dec); + }) + } + Ordering::Greater => { + // We must increase the (local) window on every open stream. + let inc = target - old_sz; + tracing::trace!("incrementing all windows; inc={}", inc); + store.try_for_each(|mut stream| { + // XXX: Shouldn't the peer have already noticed our + // overflow and sent us a GOAWAY? + stream + .recv_flow + .inc_window(inc) + .map_err(proto::Error::library_go_away)?; + stream.recv_flow.assign_capacity(inc); + Ok::<_, proto::Error>(()) + })?; + } + Ordering::Equal => (), } } @@ -556,7 +561,7 @@ impl Recv { "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); - return Ok(self.ignore_data(sz)?); + return self.ignore_data(sz); } // Ensure that there is enough capacity on the connection before acting @@ -596,7 +601,7 @@ impl Recv { if stream.state.recv_close().is_err() { proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -766,7 +771,7 @@ impl Recv { } pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) { - while let Some(_) = stream.pending_recv.pop_front(&mut self.buffer) { + while stream.pending_recv.pop_front(&mut self.buffer).is_some() { // drop it } } @@ -1089,12 +1094,7 @@ impl Recv { impl Open { pub fn is_push_promise(&self) -> bool { - use self::Open::*; - - match *self { - PushPromise => true, - _ => false, - } + matches!(*self, Self::PushPromise) } } diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 2c5a38c80..38896a304 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -7,11 +7,11 @@ use crate::frame::{self, Reason}; use crate::proto::{Error, Initiator}; use bytes::Buf; -use http; -use std::task::{Context, Poll, Waker}; use tokio::io::AsyncWrite; +use std::cmp::Ordering; use std::io; +use std::task::{Context, Poll, Waker}; /// Manages state transitions related to outbound frames. #[derive(Debug)] @@ -456,57 +456,61 @@ impl Send { let old_val = self.init_window_sz; self.init_window_sz = val; - if val < old_val { - // We must decrease the (remote) window on every open stream. - let dec = old_val - val; - tracing::trace!("decrementing all windows; dec={}", dec); - - let mut total_reclaimed = 0; - store.for_each(|mut stream| { - let stream = &mut *stream; - - stream.send_flow.dec_send_window(dec); - - // It's possible that decreasing the window causes - // `window_size` (the stream-specific window) to fall below - // `available` (the portion of the connection-level window - // that we have allocated to the stream). - // In this case, we should take that excess allocation away - // and reassign it to other streams. - let window_size = stream.send_flow.window_size(); - let available = stream.send_flow.available().as_size(); - let reclaimed = if available > window_size { - // Drop down to `window_size`. - let reclaim = available - window_size; - stream.send_flow.claim_capacity(reclaim); - total_reclaimed += reclaim; - reclaim - } else { - 0 - }; - - tracing::trace!( - "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", - stream.id, - dec, - reclaimed, - stream.send_flow - ); - - // TODO: Should this notify the producer when the capacity - // of a stream is reduced? Maybe it should if the capacity - // is reduced to zero, allowing the producer to stop work. - }); - - self.prioritize - .assign_connection_capacity(total_reclaimed, store, counts); - } else if val > old_val { - let inc = val - old_val; - - store.try_for_each(|mut stream| { - self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) - .map_err(Error::library_go_away) - })?; + match val.cmp(&old_val) { + Ordering::Less => { + // We must decrease the (remote) window on every open stream. + let dec = old_val - val; + tracing::trace!("decrementing all windows; dec={}", dec); + + let mut total_reclaimed = 0; + store.for_each(|mut stream| { + let stream = &mut *stream; + + stream.send_flow.dec_send_window(dec); + + // It's possible that decreasing the window causes + // `window_size` (the stream-specific window) to fall below + // `available` (the portion of the connection-level window + // that we have allocated to the stream). + // In this case, we should take that excess allocation away + // and reassign it to other streams. + let window_size = stream.send_flow.window_size(); + let available = stream.send_flow.available().as_size(); + let reclaimed = if available > window_size { + // Drop down to `window_size`. + let reclaim = available - window_size; + stream.send_flow.claim_capacity(reclaim); + total_reclaimed += reclaim; + reclaim + } else { + 0 + }; + + tracing::trace!( + "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", + stream.id, + dec, + reclaimed, + stream.send_flow + ); + + // TODO: Should this notify the producer when the capacity + // of a stream is reduced? Maybe it should if the capacity + // is reduced to zero, allowing the producer to stop work. + }); + + self.prioritize + .assign_connection_capacity(total_reclaimed, store, counts); + } + Ordering::Greater => { + let inc = val - old_val; + + store.try_for_each(|mut stream| { + self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) + .map_err(Error::library_go_away) + })?; + } + Ordering::Equal => (), } } diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 9931d41b1..1233e2352 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -343,10 +343,7 @@ impl State { } pub fn is_scheduled_reset(&self) -> bool { - match self.inner { - Closed(Cause::ScheduledLibraryReset(..)) => true, - _ => false, - } + matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..))) } pub fn is_local_reset(&self) -> bool { @@ -367,65 +364,57 @@ impl State { } pub fn is_send_streaming(&self) -> bool { - match self.inner { + matches!( + self.inner, Open { - local: Streaming, .. - } => true, - HalfClosedRemote(Streaming) => true, - _ => false, - } + local: Streaming, + .. + } | HalfClosedRemote(Streaming) + ) } /// Returns true when the stream is in a state to receive headers pub fn is_recv_headers(&self) -> bool { - match self.inner { - Idle => true, - Open { + matches!( + self.inner, + Idle | Open { remote: AwaitingHeaders, .. - } => true, - HalfClosedLocal(AwaitingHeaders) => true, - ReservedRemote => true, - _ => false, - } + } | HalfClosedLocal(AwaitingHeaders) + | ReservedRemote + ) } pub fn is_recv_streaming(&self) -> bool { - match self.inner { + matches!( + self.inner, Open { - remote: Streaming, .. - } => true, - HalfClosedLocal(Streaming) => true, - _ => false, - } + remote: Streaming, + .. + } | HalfClosedLocal(Streaming) + ) } pub fn is_closed(&self) -> bool { - match self.inner { - Closed(_) => true, - _ => false, - } + matches!(self.inner, Closed(_)) } pub fn is_recv_closed(&self) -> bool { - match self.inner { - Closed(..) | HalfClosedRemote(..) | ReservedLocal => true, - _ => false, - } + matches!( + self.inner, + Closed(..) | HalfClosedRemote(..) | ReservedLocal + ) } pub fn is_send_closed(&self) -> bool { - match self.inner { - Closed(..) | HalfClosedLocal(..) | ReservedRemote => true, - _ => false, - } + matches!( + self.inner, + Closed(..) | HalfClosedLocal(..) | ReservedRemote + ) } pub fn is_idle(&self) -> bool { - match self.inner { - Idle => true, - _ => false, - } + matches!(self.inner, Idle) } pub fn ensure_recv_open(&self) -> Result { diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 3e34b7cb2..d33a01cce 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -1,7 +1,5 @@ use super::*; -use slab; - use indexmap::{self, IndexMap}; use std::convert::Infallible; @@ -302,15 +300,15 @@ where let mut stream = store.resolve(idxs.head); if idxs.head == idxs.tail { - assert!(N::next(&*stream).is_none()); + assert!(N::next(&stream).is_none()); self.indices = None; } else { - idxs.head = N::take_next(&mut *stream).unwrap(); + idxs.head = N::take_next(&mut stream).unwrap(); self.indices = Some(idxs); } - debug_assert!(N::is_queued(&*stream)); - N::set_queued(&mut *stream, false); + debug_assert!(N::is_queued(&stream)); + N::set_queued(&mut stream, false); return Some(stream); } @@ -347,7 +345,7 @@ impl<'a> Ptr<'a> { } pub fn store_mut(&mut self) -> &mut Store { - &mut self.store + self.store } /// Remove the stream from the store diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index de7f4f641..68a29828c 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -252,7 +252,7 @@ impl Stream { // The stream is not in any queue !self.is_pending_send && !self.is_pending_send_capacity && !self.is_pending_accept && !self.is_pending_window_update && - !self.is_pending_open && !self.reset_at.is_some() + !self.is_pending_open && self.reset_at.is_none() } /// Returns true when the consumer of the stream has dropped all handles @@ -379,7 +379,7 @@ impl store::Next for NextSend { if val { // ensure that stream is not queued for being opened // if it's being put into queue for sending data - debug_assert_eq!(stream.is_pending_open, false); + debug_assert!(!stream.is_pending_open); } stream.is_pending_send = val; } @@ -450,7 +450,7 @@ impl store::Next for NextOpen { if val { // ensure that stream is not queued for being sent // if it's being put into queue for opening the stream - debug_assert_eq!(stream.is_pending_send, false); + debug_assert!(!stream.is_pending_send); } stream.is_pending_open = val; } @@ -486,9 +486,6 @@ impl store::Next for NextResetExpire { impl ContentLength { pub fn is_head(&self) -> bool { - match *self { - ContentLength::Head => true, - _ => false, - } + matches!(*self, Self::Head) } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 62c55524c..01bdcdd70 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -312,29 +312,29 @@ impl DynStreams<'_, B> { pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_headers(self.peer, &self.send_buffer, frame) + me.recv_headers(self.peer, self.send_buffer, frame) } pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_data(self.peer, &self.send_buffer, frame) + me.recv_data(self.peer, self.send_buffer, frame) } pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_reset(&self.send_buffer, frame) + me.recv_reset(self.send_buffer, frame) } /// Notify all streams that a connection-level error happened. pub fn handle_error(&mut self, err: proto::Error) -> StreamId { let mut me = self.inner.lock().unwrap(); - me.handle_error(&self.send_buffer, err) + me.handle_error(self.send_buffer, err) } pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_go_away(&self.send_buffer, frame) + me.recv_go_away(self.send_buffer, frame) } pub fn last_processed_id(&self) -> StreamId { @@ -343,22 +343,22 @@ impl DynStreams<'_, B> { pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_window_update(&self.send_buffer, frame) + me.recv_window_update(self.send_buffer, frame) } pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_push_promise(&self.send_buffer, frame) + me.recv_push_promise(self.send_buffer, frame) } pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { let mut me = self.inner.lock().map_err(|_| ())?; - me.recv_eof(&self.send_buffer, clear_pending_accept) + me.recv_eof(self.send_buffer, clear_pending_accept) } pub fn send_reset(&mut self, id: StreamId, reason: Reason) { let mut me = self.inner.lock().unwrap(); - me.send_reset(&self.send_buffer, id, reason) + me.send_reset(self.send_buffer, id, reason) } pub fn send_go_away(&mut self, last_processed_id: StreamId) { @@ -725,7 +725,7 @@ impl Inner { } None => { proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -1146,7 +1146,7 @@ impl StreamRef { let mut child_stream = me.store.resolve(child_key); child_stream.unlink(); child_stream.remove(); - return Err(err.into()); + return Err(err); } me.refs += 1; @@ -1390,7 +1390,7 @@ impl Clone for OpaqueStreamRef { OpaqueStreamRef { inner: self.inner.clone(), - key: self.key.clone(), + key: self.key, } } } diff --git a/src/server.rs b/src/server.rs index 6e216a40a..e4098e080 100644 --- a/src/server.rs +++ b/src/server.rs @@ -413,7 +413,7 @@ where ) -> Poll, SendResponse), crate::Error>>> { // Always try to advance the internal state. Getting Pending also is // needed to allow this function to return Pending. - if let Poll::Ready(_) = self.poll_closed(cx)? { + if self.poll_closed(cx)?.is_ready() { // If the socket is closed, don't return anything // TODO: drop any pending streams return Poll::Ready(None); diff --git a/src/share.rs b/src/share.rs index f4e3cdeb0..26b428797 100644 --- a/src/share.rs +++ b/src/share.rs @@ -556,8 +556,8 @@ impl PingPong { pub fn send_ping(&mut self, ping: Ping) -> Result<(), crate::Error> { // Passing a `Ping` here is just to be forwards-compatible with // eventually allowing choosing a ping payload. For now, we can - // just drop it. - drop(ping); + // just ignore it. + let _ = ping; self.inner.send_ping().map_err(|err| match err { Some(err) => err.into(), diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index f2c07bacb..862e0c629 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -9,8 +9,8 @@ use h2::{ frame::{self, Frame, StreamId}, }; -pub const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -pub const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +pub const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +pub const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; // ==== helper functions to easily construct h2 Frames ==== diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index cc314cd06..18d084841 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -56,7 +56,7 @@ struct Inner { closed: bool, } -const PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; +const PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// Create a new mock and handle pub fn new() -> (Mock, Handle) { @@ -148,7 +148,7 @@ impl Handle { poll_fn(move |cx| { while buf.has_remaining() { let res = Pin::new(self.codec.get_mut()) - .poll_write(cx, &mut buf.chunk()) + .poll_write(cx, buf.chunk()) .map_err(|e| panic!("write err={:?}", e)); let n = ready!(res).unwrap(); diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index d34f1b996..c40a518da 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -103,7 +103,7 @@ where // Connection is done... b.await } - Right((v, _)) => return v, + Right((v, _)) => v, Left((Err(e), _)) => panic!("err: {:?}", e), } }) diff --git a/tests/h2-support/src/util.rs b/tests/h2-support/src/util.rs index 1150d5925..aa7fb2c54 100644 --- a/tests/h2-support/src/util.rs +++ b/tests/h2-support/src/util.rs @@ -36,7 +36,7 @@ pub async fn yield_once() { pub fn wait_for_capacity(stream: h2::SendStream, target: usize) -> WaitForCapacity { WaitForCapacity { stream: Some(stream), - target: target, + target, } } @@ -66,7 +66,7 @@ impl Future for WaitForCapacity { assert_ne!(act, 0); if act >= self.target { - return Poll::Ready(self.stream.take().unwrap().into()); + return Poll::Ready(self.stream.take().unwrap()); } } } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 9635bcc6c..07b291f42 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -371,7 +371,7 @@ async fn send_request_poll_ready_when_connection_error() { resp2.await.expect_err("req2"); })); - while let Some(_) = unordered.next().await {} + while unordered.next().await.is_some() {} }; join(srv, h2).await; @@ -489,9 +489,8 @@ async fn http_2_request_without_scheme_or_authority() { client .send_request(request, true) .expect_err("should be UserError"); - let ret = h2.await.expect("h2"); + let _: () = h2.await.expect("h2"); drop(client); - ret }; join(srv, h2).await; @@ -1452,8 +1451,8 @@ async fn extended_connect_request() { join(srv, h2).await; } -const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; trait MockH2 { fn handshake(&mut self) -> &mut Self; diff --git a/tests/h2-tests/tests/hammer.rs b/tests/h2-tests/tests/hammer.rs index 9a200537a..a5cba3dfa 100644 --- a/tests/h2-tests/tests/hammer.rs +++ b/tests/h2-tests/tests/hammer.rs @@ -58,7 +58,7 @@ impl Server { } fn addr(&self) -> SocketAddr { - self.addr.clone() + self.addr } fn request_count(&self) -> usize { diff --git a/tests/h2-tests/tests/ping_pong.rs b/tests/h2-tests/tests/ping_pong.rs index a57f35c17..0f93578cc 100644 --- a/tests/h2-tests/tests/ping_pong.rs +++ b/tests/h2-tests/tests/ping_pong.rs @@ -11,9 +11,8 @@ async fn recv_single_ping() { // Create the handshake let h2 = async move { - let (client, conn) = client::handshake(m).await.unwrap(); - let c = conn.await.unwrap(); - (client, c) + let (_client, conn) = client::handshake(m).await.unwrap(); + let _: () = conn.await.unwrap(); }; let mock = async move { @@ -146,6 +145,7 @@ async fn user_notifies_when_connection_closes() { srv }; + #[allow(clippy::async_yields_async)] let client = async move { let (_client, mut conn) = client::handshake(io).await.expect("client handshake"); // yield once so we can ack server settings diff --git a/tests/h2-tests/tests/push_promise.rs b/tests/h2-tests/tests/push_promise.rs index f52f781d5..94c1154ef 100644 --- a/tests/h2-tests/tests/push_promise.rs +++ b/tests/h2-tests/tests/push_promise.rs @@ -223,7 +223,7 @@ async fn pending_push_promises_reset_when_dropped() { assert_eq!(resp.status(), StatusCode::OK); }; - let _ = conn.drive(req).await; + conn.drive(req).await; conn.await.expect("client"); drop(client); }; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 948ad1630..cc573f903 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -5,8 +5,8 @@ use futures::StreamExt; use h2_support::prelude::*; use tokio::io::AsyncWriteExt; -const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; #[tokio::test] async fn read_preface_in_multiple_frames() { diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index f2b2efc1e..9f348d5f2 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -786,7 +786,7 @@ async fn rst_while_closing() { // Enqueue trailers frame. let _ = stream.send_trailers(HeaderMap::new()); // Signal the server mock to send RST_FRAME - let _ = tx.send(()).unwrap(); + let _: () = tx.send(()).unwrap(); drop(stream); yield_once().await; // yield once to allow the server mock to be polled diff --git a/util/genfixture/src/main.rs b/util/genfixture/src/main.rs index a6d730761..9dc7b00f9 100644 --- a/util/genfixture/src/main.rs +++ b/util/genfixture/src/main.rs @@ -10,7 +10,7 @@ fn main() { let path = args.get(1).expect("usage: genfixture [PATH]"); let path = Path::new(path); - let mut tests = HashMap::new(); + let mut tests: HashMap> = HashMap::new(); for entry in WalkDir::new(path) { let entry = entry.unwrap(); @@ -28,21 +28,21 @@ fn main() { let fixture_path = path.split("fixtures/hpack/").last().unwrap(); // Now, split that into the group and the name - let module = fixture_path.split("/").next().unwrap(); + let module = fixture_path.split('/').next().unwrap(); tests .entry(module.to_string()) - .or_insert(vec![]) + .or_default() .push(fixture_path.to_string()); } let mut one = false; for (module, tests) in tests { - let module = module.replace("-", "_"); + let module = module.replace('-', "_"); if one { - println!(""); + println!(); } one = true; @@ -51,7 +51,7 @@ fn main() { println!(" {} => {{", module); for test in tests { - let ident = test.split("/").nth(1).unwrap().split(".").next().unwrap(); + let ident = test.split('/').nth(1).unwrap().split('.').next().unwrap(); println!(" ({}, {:?});", ident, test); } diff --git a/util/genhuff/src/main.rs b/util/genhuff/src/main.rs index 2d5b0ba75..6418fab8b 100644 --- a/util/genhuff/src/main.rs +++ b/util/genhuff/src/main.rs @@ -112,8 +112,8 @@ impl Node { }; start.transitions.borrow_mut().push(Transition { - target: target, - byte: byte, + target, + byte, maybe_eos: self.maybe_eos, }); @@ -238,7 +238,7 @@ pub fn main() { let (encode, decode) = load_table(); println!("// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs"); - println!(""); + println!(); println!("// (num-bits, bits)"); println!("pub const ENCODE_TABLE: [(usize, u64); 257] = ["); @@ -247,7 +247,7 @@ pub fn main() { } println!("];"); - println!(""); + println!(); println!("// (next-state, byte, flags)"); println!("pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = ["); @@ -256,7 +256,7 @@ pub fn main() { println!("];"); } -const TABLE: &'static str = r##" +const TABLE: &str = r##" ( 0) |11111111|11000 1ff8 [13] ( 1) |11111111|11111111|1011000 7fffd8 [23] ( 2) |11111111|11111111|11111110|0010 fffffe2 [28] From 68e44034b57607c964a257f4a2d77ecae38cf87e Mon Sep 17 00:00:00 2001 From: John Howard Date: Tue, 10 Jan 2023 11:29:05 -0800 Subject: [PATCH 05/56] Consistently indicate `malformed` in logs (#658) --- src/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.rs b/src/server.rs index e4098e080..2de95374b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1478,7 +1478,7 @@ impl proto::Peer for Peer { // A :scheme is required, except CONNECT. if let Some(scheme) = pseudo.scheme { if is_connect && !has_protocol { - malformed!(":scheme in CONNECT"); + malformed!("malformed headers: :scheme in CONNECT"); } let maybe_scheme = scheme.parse(); let scheme = maybe_scheme.or_else(|why| { @@ -1501,7 +1501,7 @@ impl proto::Peer for Peer { if let Some(path) = pseudo.path { if is_connect && !has_protocol { - malformed!(":path in CONNECT"); + malformed!("malformed headers: :path in CONNECT"); } // This cannot be empty From b84c2442ba9ec3728d11fbb7bd018e83e977dd55 Mon Sep 17 00:00:00 2001 From: Folke B Date: Fri, 20 Jan 2023 16:44:22 +0100 Subject: [PATCH 06/56] Add Protocol extension to Request on Extended CONNECT (#655) This exposes the :protocol pseudo header as Request extension. Fixes #347 --- src/server.rs | 9 +++++++-- tests/h2-tests/tests/server.rs | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/server.rs b/src/server.rs index 2de95374b..bb3d3cf86 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1451,8 +1451,13 @@ impl proto::Peer for Peer { } let has_protocol = pseudo.protocol.is_some(); - if !is_connect && has_protocol { - malformed!("malformed headers: :protocol on non-CONNECT request"); + if has_protocol { + if is_connect { + // Assert that we have the right type. + b = b.extension::(pseudo.protocol.unwrap()); + } else { + malformed!("malformed headers: :protocol on non-CONNECT request"); + } } if pseudo.status.is_some() { diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index cc573f903..8aea1fd59 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1214,7 +1214,12 @@ async fn extended_connect_protocol_enabled_during_handshake() { let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); - let (_req, mut stream) = srv.next().await.unwrap().unwrap(); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + assert_eq!( + req.extensions().get::(), + Some(&crate::ext::Protocol::from_static("the-bread-protocol")) + ); let rsp = Response::new(()); stream.send_response(rsp, false).unwrap(); From 73bea23e9b6967cc9699918b8965e0fd87e8ae53 Mon Sep 17 00:00:00 2001 From: Chekov Date: Tue, 14 Feb 2023 07:39:40 +0800 Subject: [PATCH 07/56] fix: panic in when there is connection window available, but not stream (#657) We met the panic in our production environment, so handle this panic condition before panic. The stack backtrace: Co-authored-by: winters.zc --- src/proto/streams/flow_control.rs | 15 +++++++++------ src/proto/streams/prioritize.rs | 8 ++++++++ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index b1b2745eb..73a7754db 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -173,12 +173,15 @@ impl FlowControl { self.available ); - // Ensure that the argument is correct - assert!(self.window_size >= sz as usize); - - // Update values - self.window_size -= sz; - self.available -= sz; + // If send size is zero it's meaningless to update flow control window + if sz > 0 { + // Ensure that the argument is correct + assert!(self.window_size >= sz as usize); + + // Update values + self.window_size -= sz; + self.available -= sz; + } } } diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 329e55022..f89a772f0 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -744,6 +744,14 @@ impl Prioritize { // capacity at this point. debug_assert!(len <= self.flow.window_size()); + // Check if the stream level window the peer knows is available. In some + // scenarios, maybe the window we know is available but the window which + // peer knows is not. + if len > 0 && len > stream.send_flow.window_size() { + stream.pending_send.push_front(buffer, frame.into()); + continue; + } + tracing::trace!(len, "sending data frame"); // Update the flow control From 732319039ff6184076b339f6870746b6c7fed86f Mon Sep 17 00:00:00 2001 From: Vadim Egorov <96079228+vadim-eg@users.noreply.github.com> Date: Mon, 20 Feb 2023 14:55:05 -0800 Subject: [PATCH 08/56] Avoid spurious wakeups when stream capacity is not available (#661) Fixes #628 Sometimes `poll_capacity` returns `Ready(Some(0))` - in which case caller will have no way to wait for the stream capacity to become available. The previous attempt on the fix has addressed only a part of the problem. The root cause - in a nutshell - is the race condition between the application tasks that performs stream I/O and the task that serves the underlying HTTP/2 connection. The application thread that is about to send data calls `reserve_capacity/poll_capacity`, is provided with some send capacity and proceeds to `send_data`. Meanwhile the service thread may send some buffered data and/or receive some window updates - either way the stream's effective allocated send capacity may not change, but, since the capacity still available, `send_capacity_inc` flag may be set. The sending task calls `send_data` and uses the entire allocated capacity, leaving the flag set. Next time `poll_capacity` returns `Ready(Some(0))`. This change sets the flag and dispatches the wakeup event only in cases when the effective capacity reported by `poll_capacity` actually increases. --- src/proto/streams/prioritize.rs | 20 +++------ src/proto/streams/send.rs | 7 +--- src/proto/streams/stream.rs | 56 +++++++++++++++++++------ tests/h2-tests/tests/flow_control.rs | 61 ++++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 33 deletions(-) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index f89a772f0..88204ddcc 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -323,9 +323,11 @@ impl Prioritize { /// connection pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { let available = stream.send_flow.available().as_size(); - stream.send_flow.claim_capacity(available); - // Re-assign all capacity to the connection - self.assign_connection_capacity(available, stream, counts); + if available > 0 { + stream.send_flow.claim_capacity(available); + // Re-assign all capacity to the connection + self.assign_connection_capacity(available, stream, counts); + } } /// Reclaim just reserved capacity, not buffered capacity, and re-assign @@ -756,17 +758,7 @@ impl Prioritize { // Update the flow control tracing::trace_span!("updating stream flow").in_scope(|| { - stream.send_flow.send_data(len); - - // Decrement the stream's buffered data counter - debug_assert!(stream.buffered_send_data >= len as usize); - stream.buffered_send_data -= len as usize; - stream.requested_send_capacity -= len; - - // If the capacity was limited because of the - // max_send_buffer_size, then consider waking - // the send task again... - stream.notify_if_can_buffer_more(self.max_buffer_size); + stream.send_data(len, self.max_buffer_size); // Assign the capacity back to the connection that // was just consumed from the stream in the previous diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 38896a304..20aba38d4 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -333,12 +333,7 @@ impl Send { /// Current available stream send capacity pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { - let available = stream.send_flow.available().as_size() as usize; - let buffered = stream.buffered_send_data; - - available - .min(self.prioritize.max_buffer_size()) - .saturating_sub(buffered) as WindowSize + stream.capacity(self.prioritize.max_buffer_size()) } pub fn poll_reset( diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 68a29828c..2888d744b 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -264,35 +264,65 @@ impl Stream { self.ref_count == 0 && !self.state.is_closed() } + /// Current available stream send capacity + pub fn capacity(&self, max_buffer_size: usize) -> WindowSize { + let available = self.send_flow.available().as_size() as usize; + let buffered = self.buffered_send_data; + + available.min(max_buffer_size).saturating_sub(buffered) as WindowSize + } + pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { + let prev_capacity = self.capacity(max_buffer_size); debug_assert!(capacity > 0); self.send_flow.assign_capacity(capacity); tracing::trace!( - " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={}", + " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", self.send_flow.available(), self.buffered_send_data, self.id, - max_buffer_size + max_buffer_size, + prev_capacity, ); - self.notify_if_can_buffer_more(max_buffer_size); + if prev_capacity < self.capacity(max_buffer_size) { + self.notify_capacity(); + } } - /// If the capacity was limited because of the max_send_buffer_size, - /// then consider waking the send task again... - pub fn notify_if_can_buffer_more(&mut self, max_buffer_size: usize) { - let available = self.send_flow.available().as_size() as usize; - let buffered = self.buffered_send_data; + pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) { + let prev_capacity = self.capacity(max_buffer_size); + + self.send_flow.send_data(len); - // Only notify if the capacity exceeds the amount of buffered data - if available.min(max_buffer_size) > buffered { - self.send_capacity_inc = true; - tracing::trace!(" notifying task"); - self.notify_send(); + // Decrement the stream's buffered data counter + debug_assert!(self.buffered_send_data >= len as usize); + self.buffered_send_data -= len as usize; + self.requested_send_capacity -= len; + + tracing::trace!( + " sent stream data; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", + self.send_flow.available(), + self.buffered_send_data, + self.id, + max_buffer_size, + prev_capacity, + ); + + if prev_capacity < self.capacity(max_buffer_size) { + self.notify_capacity(); } } + /// If the capacity was limited because of the max_send_buffer_size, + /// then consider waking the send task again... + pub fn notify_capacity(&mut self) { + self.send_capacity_inc = true; + tracing::trace!(" notifying task"); + self.notify_send(); + } + /// Returns `Err` when the decrement cannot be completed due to overflow. pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { match self.content_length { diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 92e7a532f..5caa2ec3a 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1797,3 +1797,64 @@ async fn max_send_buffer_size_poll_capacity_wakes_task() { join(srv, client).await; } + +#[tokio::test] +async fn poll_capacity_wakeup_after_window_update() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().initial_window_size(10)) + .await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.send_frame(frames::window_update(1, 5)).await; + srv.send_frame(frames::window_update(1, 5)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.recv_frame(frames::data(1, &b""[..]).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::Builder::new() + .max_send_buffer_size(5) + .handshake::<_, Bytes>(io) + .await + .unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = h2.drive(response).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(10); + assert_eq!(stream.capacity(), 0); + + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + h2.drive(idle_ms(10)).await; + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(5); + assert_eq!(stream.capacity(), 0); + + // This will panic if there is a bug causing h2 to return Ok(0) from poll_capacity. + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + + stream.send_data("".into(), true).unwrap(); + + // Wait for the connection to close + h2.await.unwrap(); + }; + + join(srv, h2).await; +} From 96caf4fca32fad92037e082b6b74220274faaf16 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 22 Feb 2023 17:09:20 +0100 Subject: [PATCH 09/56] Add a message for EOF-related broken pipe errors (#615) It's quite confusing from production logs when all I get is "broken pipe" and I don't know which path was taken for that error to be logged. --- src/proto/streams/state.rs | 8 +++++++- src/proto/streams/streams.rs | 8 +++++++- tests/h2-tests/tests/client_request.rs | 9 ++++++--- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 1233e2352..db37831f8 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -303,7 +303,13 @@ impl State { Closed(..) => {} ref state => { tracing::trace!("recv_eof; state={:?}", state); - self.inner = Closed(Cause::Error(io::ErrorKind::BrokenPipe.into())); + self.inner = Closed(Cause::Error( + io::Error::new( + io::ErrorKind::BrokenPipe, + "stream closed because of a broken pipe", + ) + .into(), + )); } } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 01bdcdd70..e1a2e3fe7 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -806,7 +806,13 @@ impl Inner { let send_buffer = &mut *send_buffer; if actions.conn_error.is_none() { - actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); + actions.conn_error = Some( + io::Error::new( + io::ErrorKind::BrokenPipe, + "connection closed because of a broken pipe", + ) + .into(), + ); } tracing::trace!("Streams::recv_eof"); diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 07b291f42..aff39f5c1 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -574,7 +574,7 @@ async fn connection_close_notifies_response_future() { .0 .await; let err = res.expect_err("response"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!(err.to_string(), "stream closed because of a broken pipe"); }; join(async move { conn.await.expect("conn") }, req).await; }; @@ -613,7 +613,7 @@ async fn connection_close_notifies_client_poll_ready() { .0 .await; let err = res.expect_err("response"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!(err.to_string(), "stream closed because of a broken pipe"); }; conn.drive(req).await; @@ -621,7 +621,10 @@ async fn connection_close_notifies_client_poll_ready() { let err = poll_fn(move |cx| client.poll_ready(cx)) .await .expect_err("poll_ready"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!( + err.to_string(), + "connection closed because of a broken pipe" + ); }; join(srv, client).await; From b9dcd39915420ab1d9f4a8ad0f96c86af8f86558 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 27 Feb 2023 12:03:15 -0500 Subject: [PATCH 10/56] v0.3.16 --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09c99aac3..17abf81db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.3.16 (February 27, 2023) + +* Set `Protocol` extension on requests when received Extended CONNECT requests. +* Remove `B: Unpin + 'static` bound requiremented of bufs +* Fix releasing of frames when stream is finished, reducing memory usage. +* Fix panic when trying to send data and connection window is available, but stream window is not. +* Fix spurious wakeups when stream capacity is not available. + # 0.3.15 (October 21, 2022) * Remove `B: Buf` bound on `SendStream`'s parameter diff --git a/Cargo.toml b/Cargo.toml index 8e904875e..be87f0303 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.15" +version = "0.3.16" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index e7b95035f..3af8b1a32 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.15")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.16")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 45b9bccdfcb26cfe9907123a1e975a22eb84c44f Mon Sep 17 00:00:00 2001 From: Alex Touchet Date: Tue, 28 Feb 2023 08:15:12 -0800 Subject: [PATCH 11/56] chore: set rust-version in Cargo.toml (#664) --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index be87f0303..64573e1f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" +rust-version = "1.56" [features] # Enables `futures::Stream` implementations for various types. From d3d50ef8123f0a1b6d16faa2d9edc01418da7c00 Mon Sep 17 00:00:00 2001 From: Constantin Nickel Date: Wed, 12 Apr 2023 14:30:32 +0200 Subject: [PATCH 12/56] chore: Replace unmaintained/outdated GitHub Actions --- .github/workflows/CI.yml | 48 ++++++++++------------------------------ 1 file changed, 12 insertions(+), 36 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f433f9082..1ae9a54b3 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -13,21 +13,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: stable - override: true components: rustfmt - - name: cargo fmt --check - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + - run: cargo fmt --all --check test: name: Test @@ -43,38 +36,26 @@ jobs: - stable steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true - name: Install libssl-dev run: sudo apt-get update && sudo apt-get install libssl-dev - name: Build without unstable flag - uses: actions-rs/cargo@v1 - with: - command: build + run: cargo build - name: Check with unstable flag - uses: actions-rs/cargo@v1 - with: - command: check - args: --features unstable + run: cargo check --features unstable - name: Run lib tests and doc tests - uses: actions-rs/cargo@v1 - with: - command: test + run: cargo test - name: Run integration tests - uses: actions-rs/cargo@v1 - with: - command: test - args: -p h2-tests + run: cargo test -p h2-tests - name: Run h2spec run: ./ci/h2spec.sh @@ -99,16 +80,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true - - name: Check - uses: actions-rs/cargo@v1 - with: - command: check + - run: cargo check From 481c31d5283bf32b90c83388c037494548934971 Mon Sep 17 00:00:00 2001 From: Constantin Nickel Date: Wed, 12 Apr 2023 14:40:20 +0200 Subject: [PATCH 13/56] chore: Use Cargo metadata for the MSRV build job --- .github/workflows/CI.yml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 1ae9a54b3..2cff15cff 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -66,25 +66,24 @@ jobs: if: matrix.rust == 'nightly' msrv: - name: Check MSRV (${{ matrix.rust }}) + name: Check MSRV needs: [style] - strategy: - matrix: - rust: - - 1.56 # never go past Hyper's own MSRV - - os: - - ubuntu-latest - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - - name: Install Rust (${{ matrix.rust }}) + - name: Get MSRV from package metadata + id: metadata + run: | + cargo metadata --no-deps --format-version 1 | + jq -r '"msrv=" + (.packages[] | select(.name == "h2")).rust_version' >> $GITHUB_OUTPUT + + - name: Install Rust (${{ steps.metadata.outputs.msrv }}) uses: dtolnay/rust-toolchain@master with: - toolchain: ${{ matrix.rust }} + toolchain: ${{ steps.metadata.outputs.msrv }} - run: cargo check From 8088ca658d90a3874fb6b136b85776424265295b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 13 Apr 2023 09:11:55 -0400 Subject: [PATCH 14/56] feat: add Error::is_library method --- src/error.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/error.rs b/src/error.rs index d45827e36..1b1438e48 100644 --- a/src/error.rs +++ b/src/error.rs @@ -103,6 +103,16 @@ impl Error { Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote) ) } + + /// Returns true if the error was created by `h2. + /// + /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM. + pub fn is_library(&self) -> bool { + matches!( + self.kind, + Kind::GoAway(_, _, Initiator::Library) | Kind::Reset(_, _, Initiator::Library) + ) + } } impl From for Error { From 5bc8e72e5fcbd8ae2d3d9bc78a1c0ef0040bcc39 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 12 Apr 2023 12:23:56 -0400 Subject: [PATCH 15/56] fix: limit the amount of pending-accept reset streams Streams that have been received by the peer, but not accepted by the user, can also receive a RST_STREAM. This is a legitimate pattern: one could send a request and then shortly after, realize it is not needed, sending a CANCEL. However, since those streams are now "closed", they don't count towards the max concurrent streams. So, they will sit in the accept queue, using memory. In most cases, the user is calling `accept` in a loop, and they can accept requests that have been reset fast enough that this isn't an issue in practice. But if the peer is able to flood the network faster than the server accept loop can run (simply accepting, not processing requests; that tends to happen in a separate task), the memory could grow. So, this introduces a maximum count for streams in the pending-accept but remotely-reset state. If the maximum is reached, a GOAWAY frame with the error code of ENHANCE_YOUR_CALM is sent, and the connection marks itself as errored. ref CVE-2023-26964 ref GHSA-f8vr-r385-rh5r Closes https://github.com/hyperium/hyper/issues/2877 --- src/proto/connection.rs | 8 +++++ src/proto/streams/counts.rs | 51 ++++++++++++++++++++++----- src/proto/streams/mod.rs | 4 +++ src/proto/streams/recv.rs | 30 ++++++++++++++-- src/proto/streams/state.rs | 7 ++++ src/proto/streams/streams.rs | 8 ++++- src/server.rs | 7 ++++ tests/h2-support/src/frames.rs | 4 +++ tests/h2-tests/tests/stream_states.rs | 41 ++++++++++++++++++++- 9 files changed, 148 insertions(+), 12 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 59883cf33..1fec23102 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -14,6 +14,8 @@ use std::task::{Context, Poll}; use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; +const DEFAULT_MAX_REMOTE_RESET_STREAMS: usize = 20; + /// An H2 connection #[derive(Debug)] pub(crate) struct Connection @@ -118,6 +120,7 @@ where .unwrap_or(false), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, + remote_reset_max: DEFAULT_MAX_REMOTE_RESET_STREAMS, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_max_initiated: config .settings @@ -172,6 +175,11 @@ where self.inner.streams.max_recv_streams() } + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + self.inner.streams.num_wired_streams() + } + /// Returns `Ready` when the connection is ready to receive a frame. /// /// Returns `Error` as this may raise errors that are caused by delayed diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index 70dfc7851..6a5aa9ccd 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -21,10 +21,16 @@ pub(super) struct Counts { num_recv_streams: usize, /// Maximum number of pending locally reset streams - max_reset_streams: usize, + max_local_reset_streams: usize, /// Current number of pending locally reset streams - num_reset_streams: usize, + num_local_reset_streams: usize, + + /// Max number of "pending accept" streams that were remotely reset + max_remote_reset_streams: usize, + + /// Current number of "pending accept" streams that were remotely reset + num_remote_reset_streams: usize, } impl Counts { @@ -36,8 +42,10 @@ impl Counts { num_send_streams: 0, max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX), num_recv_streams: 0, - max_reset_streams: config.local_reset_max, - num_reset_streams: 0, + max_local_reset_streams: config.local_reset_max, + num_local_reset_streams: 0, + max_remote_reset_streams: config.remote_reset_max, + num_remote_reset_streams: 0, } } @@ -90,7 +98,7 @@ impl Counts { /// Returns true if the number of pending reset streams can be incremented. pub fn can_inc_num_reset_streams(&self) -> bool { - self.max_reset_streams > self.num_reset_streams + self.max_local_reset_streams > self.num_local_reset_streams } /// Increments the number of pending reset streams. @@ -101,7 +109,34 @@ impl Counts { pub fn inc_num_reset_streams(&mut self) { assert!(self.can_inc_num_reset_streams()); - self.num_reset_streams += 1; + self.num_local_reset_streams += 1; + } + + pub(crate) fn max_remote_reset_streams(&self) -> usize { + self.max_remote_reset_streams + } + + /// Returns true if the number of pending REMOTE reset streams can be + /// incremented. + pub(crate) fn can_inc_num_remote_reset_streams(&self) -> bool { + self.max_remote_reset_streams > self.num_remote_reset_streams + } + + /// Increments the number of pending REMOTE reset streams. + /// + /// # Panics + /// + /// Panics on failure as this should have been validated before hand. + pub(crate) fn inc_num_remote_reset_streams(&mut self) { + assert!(self.can_inc_num_remote_reset_streams()); + + self.num_remote_reset_streams += 1; + } + + pub(crate) fn dec_num_remote_reset_streams(&mut self) { + assert!(self.num_remote_reset_streams > 0); + + self.num_remote_reset_streams -= 1; } pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { @@ -194,8 +229,8 @@ impl Counts { } fn dec_num_reset_streams(&mut self) { - assert!(self.num_reset_streams > 0); - self.num_reset_streams -= 1; + assert!(self.num_local_reset_streams > 0); + self.num_local_reset_streams -= 1; } } diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index 0ff8131c1..fbe32c7b0 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -60,6 +60,10 @@ pub struct Config { /// Maximum number of locally reset streams to keep at a time pub local_reset_max: usize, + /// Maximum number of remotely reset "pending accept" streams to keep at a + /// time. Going over this number results in a connection error. + pub remote_reset_max: usize, + /// Initial window size of remote initiated streams pub remote_init_window_sz: WindowSize, diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 497efc9bc..0fe2bdd57 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -741,12 +741,39 @@ impl Recv { } /// Handle remote sending an explicit RST_STREAM. - pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { + pub fn recv_reset( + &mut self, + frame: frame::Reset, + stream: &mut Stream, + counts: &mut Counts, + ) -> Result<(), Error> { + // Reseting a stream that the user hasn't accepted is possible, + // but should be done with care. These streams will continue + // to take up memory in the accept queue, but will no longer be + // counted as "concurrent" streams. + // + // So, we have a separate limit for these. + // + // See https://github.com/hyperium/hyper/issues/2877 + if stream.is_pending_accept { + if counts.can_inc_num_remote_reset_streams() { + counts.inc_num_remote_reset_streams(); + } else { + tracing::warn!( + "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", + counts.max_remote_reset_streams(), + ); + return Err(Error::library_go_away(Reason::ENHANCE_YOUR_CALM)); + } + } + // Notify the stream stream.state.recv_reset(frame, stream.is_pending_send); stream.notify_send(); stream.notify_recv(); + + Ok(()) } /// Handle a connection-level error @@ -1033,7 +1060,6 @@ impl Recv { cx: &Context, stream: &mut Stream, ) -> Poll>> { - // TODO: Return error when the stream is reset match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))), Some(event) => { diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index db37831f8..b9612addc 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -360,6 +360,13 @@ impl State { } } + pub fn is_remote_reset(&self) -> bool { + match self.inner { + Closed(Cause::Error(ref e)) => e.is_local(), + _ => false, + } + } + /// Returns true if the stream is already reset. pub fn is_reset(&self) -> bool { match self.inner { diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index e1a2e3fe7..dbaebfa7a 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -140,6 +140,12 @@ where // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding // the lock, so it can't. me.refs += 1; + + // Pending-accepted remotely-reset streams are counted. + if stream.state.is_remote_reset() { + me.counts.dec_num_remote_reset_streams(); + } + StreamRef { opaque: OpaqueStreamRef::new(self.inner.clone(), stream), send_buffer: self.send_buffer.clone(), @@ -601,7 +607,7 @@ impl Inner { let actions = &mut self.actions; self.counts.transition(stream, |counts, stream| { - actions.recv.recv_reset(frame, stream); + actions.recv.recv_reset(frame, stream, counts)?; actions.send.handle_error(send_buffer, stream, counts); assert!(stream.state.is_closed()); Ok(()) diff --git a/src/server.rs b/src/server.rs index bb3d3cf86..6f2455e0b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -576,6 +576,13 @@ where pub fn max_concurrent_recv_streams(&self) -> usize { self.connection.max_recv_streams() } + + // Could disappear at anytime. + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + self.connection.num_wired_streams() + } } #[cfg(feature = "stream")] diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 862e0c629..bc4e2e708 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -297,6 +297,10 @@ impl Mock { self.reason(frame::Reason::FRAME_SIZE_ERROR) } + pub fn calm(self) -> Self { + self.reason(frame::Reason::ENHANCE_YOUR_CALM) + } + pub fn no_error(self) -> Self { self.reason(frame::Reason::NO_ERROR) } diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 9f348d5f2..610d3a530 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -1,6 +1,6 @@ #![deny(warnings)] -use futures::future::{join, join3, lazy, try_join}; +use futures::future::{join, join3, lazy, poll_fn, try_join}; use futures::{FutureExt, StreamExt, TryStreamExt}; use h2_support::prelude::*; use h2_support::util::yield_once; @@ -194,6 +194,45 @@ async fn closed_streams_are_released() { join(srv, h2).await; } +#[tokio::test] +async fn reset_streams_dont_grow_memory_continuously() { + //h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + const N: u32 = 50; + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + for n in (1..(N * 2)).step_by(2) { + client + .send_frame(frames::headers(n).request("GET", "https://a.b/").eos()) + .await; + client.send_frame(frames::reset(n).protocol_error()).await; + } + tokio::time::timeout( + std::time::Duration::from_secs(1), + client.recv_frame(frames::go_away(41).calm()), + ) + .await + .expect("client goaway"); + }; + + let srv = async move { + let mut srv = server::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + poll_fn(|cx| srv.poll_closed(cx)) + .await + .expect_err("server should error"); + // specifically, not 50; + assert_eq!(21, srv.num_wired_streams()); + }; + join(srv, client).await; +} + #[tokio::test] async fn errors_if_recv_frame_exceeds_max_frame_size() { h2_support::trace_init!(); From d3f37e9fbad6608ba74419c355eb1892bd55d977 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 12 Apr 2023 21:13:41 -0400 Subject: [PATCH 16/56] feat: add `max_pending_accept_reset_streams(n)` options The new option is available to both client and server `Builder`s. --- src/client.rs | 49 +++++++++++++++++++++++++++ src/proto/connection.rs | 5 ++- src/proto/mod.rs | 1 + src/server.rs | 49 +++++++++++++++++++++++++++ tests/h2-tests/tests/stream_states.rs | 4 ++- 5 files changed, 104 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index 5dd0b0f87..4147e8a46 100644 --- a/src/client.rs +++ b/src/client.rs @@ -326,6 +326,10 @@ pub struct Builder { /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, + /// Maximum number of remotely reset streams to allow in the pending + /// accept queue. + pending_accept_reset_stream_max: usize, + /// Initial `Settings` frame to send as part of the handshake. settings: Settings, @@ -634,6 +638,7 @@ impl Builder { max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, initial_target_connection_window_size: None, initial_max_send_streams: usize::MAX, settings: Default::default(), @@ -966,6 +971,49 @@ impl Builder { self } + /// Sets the maximum number of pending-accept remotely-reset streams. + /// + /// Streams that have been received by the peer, but not accepted by the + /// user, can also receive a RST_STREAM. This is a legitimate pattern: one + /// could send a request and then shortly after, realize it is not needed, + /// sending a CANCEL. + /// + /// However, since those streams are now "closed", they don't count towards + /// the max concurrent streams. So, they will sit in the accept queue, + /// using memory. + /// + /// When the number of remotely-reset streams sitting in the pending-accept + /// queue reaches this maximum value, a connection error with the code of + /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the + /// `Future`. + /// + /// The default value is currently 20, but could change. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::client::*; + /// # use bytes::Bytes; + /// # + /// # async fn doc(my_io: T) + /// # -> Result<((SendRequest, Connection)), h2::Error> + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_pending_accept_reset_streams(100) + /// .handshake(my_io); + /// # client_fut.await + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { + self.pending_accept_reset_stream_max = max; + self + } + /// Sets the maximum send buffer size per stream. /// /// Once a stream has buffered up to (or over) the maximum, the stream's @@ -1209,6 +1257,7 @@ where max_send_buffer_size: builder.max_send_buffer_size, reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, + remote_reset_stream_max: builder.pending_accept_reset_stream_max, settings: builder.settings.clone(), }, ); diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 1fec23102..619973df8 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -14,8 +14,6 @@ use std::task::{Context, Poll}; use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; -const DEFAULT_MAX_REMOTE_RESET_STREAMS: usize = 20; - /// An H2 connection #[derive(Debug)] pub(crate) struct Connection @@ -82,6 +80,7 @@ pub(crate) struct Config { pub max_send_buffer_size: usize, pub reset_stream_duration: Duration, pub reset_stream_max: usize, + pub remote_reset_stream_max: usize, pub settings: frame::Settings, } @@ -120,7 +119,7 @@ where .unwrap_or(false), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, - remote_reset_max: DEFAULT_MAX_REMOTE_RESET_STREAMS, + remote_reset_max: config.remote_reset_stream_max, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_max_initiated: config .settings diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 5ec7bf992..d71ee9c42 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -31,6 +31,7 @@ pub type WindowSize = u32; // Constants pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; +pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/src/server.rs b/src/server.rs index 6f2455e0b..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -240,6 +240,10 @@ pub struct Builder { /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, + /// Maximum number of remotely reset streams to allow in the pending + /// accept queue. + pending_accept_reset_stream_max: usize, + /// Initial `Settings` frame to send as part of the handshake. settings: Settings, @@ -642,6 +646,7 @@ impl Builder { Builder { reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, settings: Settings::default(), initial_target_connection_window_size: None, max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, @@ -882,6 +887,49 @@ impl Builder { self } + /// Sets the maximum number of pending-accept remotely-reset streams. + /// + /// Streams that have been received by the peer, but not accepted by the + /// user, can also receive a RST_STREAM. This is a legitimate pattern: one + /// could send a request and then shortly after, realize it is not needed, + /// sending a CANCEL. + /// + /// However, since those streams are now "closed", they don't count towards + /// the max concurrent streams. So, they will sit in the accept queue, + /// using memory. + /// + /// When the number of remotely-reset streams sitting in the pending-accept + /// queue reaches this maximum value, a connection error with the code of + /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the + /// `Future`. + /// + /// The default value is currently 20, but could change. + /// + /// # Examples + /// + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_pending_accept_reset_streams(100) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { + self.pending_accept_reset_stream_max = max; + self + } + /// Sets the maximum send buffer size per stream. /// /// Once a stream has buffered up to (or over) the maximum, the stream's @@ -1312,6 +1360,7 @@ where max_send_buffer_size: self.builder.max_send_buffer_size, reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, + remote_reset_stream_max: self.builder.pending_accept_reset_stream_max, settings: self.builder.settings.clone(), }, ); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 610d3a530..5d86f7b47 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -200,6 +200,7 @@ async fn reset_streams_dont_grow_memory_continuously() { let (io, mut client) = mock::new(); const N: u32 = 50; + const MAX: usize = 20; let client = async move { let settings = client.assert_server_handshake().await; @@ -212,7 +213,7 @@ async fn reset_streams_dont_grow_memory_continuously() { } tokio::time::timeout( std::time::Duration::from_secs(1), - client.recv_frame(frames::go_away(41).calm()), + client.recv_frame(frames::go_away((MAX * 2 + 1) as u32).calm()), ) .await .expect("client goaway"); @@ -220,6 +221,7 @@ async fn reset_streams_dont_grow_memory_continuously() { let srv = async move { let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(MAX) .handshake::<_, Bytes>(io) .await .expect("handshake"); From af4bcacf6d3770e9e3dc10fdc631fc8c0bdd472b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 13 Apr 2023 10:22:58 -0400 Subject: [PATCH 17/56] v0.3.17 --- CHANGELOG.md | 9 +++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17abf81db..3c857790c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +# 0.3.17 (April 13, 2023) + +* Add `Error::is_library()` method to check if the originated inside `h2`. +* Add `max_pending_accept_reset_streams(usize)` option to client and server + builders. +* Fix theoretical memory growth when receiving too many HEADERS and then + RST_STREAM frames faster than an application can accept them off the queue. + (CVE-2023-26964) + # 0.3.16 (February 27, 2023) * Set `Protocol` extension on requests when received Extended CONNECT requests. diff --git a/Cargo.toml b/Cargo.toml index 64573e1f4..2dce90226 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.16" +version = "0.3.17" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 3af8b1a32..70321ab98 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.16")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.17")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 1c6fa285afe436ca2a1f8abd38a6389353f360b6 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 17 Apr 2023 14:08:02 -0400 Subject: [PATCH 18/56] fix: pending-accept remotely-reset streams pattern was checking is_local --- src/proto/streams/state.rs | 2 +- tests/h2-tests/tests/server.rs | 34 ++++++++++++++++++++++ tests/h2-tests/tests/stream_states.rs | 41 +++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index b9612addc..76638fc87 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -362,7 +362,7 @@ impl State { pub fn is_remote_reset(&self) -> bool { match self.inner { - Closed(Cause::Error(ref e)) => e.is_local(), + Closed(Cause::Error(ref e)) => !e.is_local(), _ => false, } } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 8aea1fd59..c8c1c9d1c 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -879,6 +879,40 @@ async fn too_big_headers_sends_reset_after_431_if_not_eos() { join(client, srv).await; } +#[tokio::test] +async fn pending_accept_recv_illegal_content_length_data() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame( + frames::headers(1) + .request("POST", "https://a.b") + .field("content-length", "1"), + ) + .await; + client + .send_frame(frames::data(1, &b"hello"[..]).eos()) + .await; + client.recv_frame(frames::reset(1).protocol_error()).await; + idle_ms(10).await; + }; + + let srv = async move { + let mut srv = server::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let _req = srv.next().await.expect("req").expect("is_ok"); + }; + + join(client, srv).await; +} + #[tokio::test] async fn poll_reset() { h2_support::trace_init!(); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 5d86f7b47..138328efa 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -235,6 +235,47 @@ async fn reset_streams_dont_grow_memory_continuously() { join(srv, client).await; } +#[tokio::test] +async fn pending_accept_reset_streams_decrement_too() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + // If it didn't decrement internally, this would eventually get + // the count over MAX. + const M: usize = 2; + const N: usize = 5; + const MAX: usize = 6; + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + let mut id = 1; + for _ in 0..M { + for _ in 0..N { + client + .send_frame(frames::headers(id).request("GET", "https://a.b/").eos()) + .await; + client.send_frame(frames::reset(id).protocol_error()).await; + id += 2; + } + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + } + }; + + let srv = async move { + let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(MAX) + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + while let Some(Ok(_)) = srv.accept().await {} + + poll_fn(|cx| srv.poll_closed(cx)).await.expect("server"); + }; + join(srv, client).await; +} + #[tokio::test] async fn errors_if_recv_frame_exceeds_max_frame_size() { h2_support::trace_init!(); From 1b9f0704ff24d5f7939d16162082c5a764a0bfaa Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 17 Apr 2023 14:40:41 -0400 Subject: [PATCH 19/56] v0.3.18 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c857790c..31852daff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.18 (April 17, 2023) + +* Fix panic because of opposite check in `is_remote_local()`. + # 0.3.17 (April 13, 2023) * Add `Error::is_library()` method to check if the originated inside `h2`. diff --git a/Cargo.toml b/Cargo.toml index 2dce90226..767961d0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.17" +version = "0.3.18" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 70321ab98..420e0fee1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.17")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.18")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 072f7ee918d4d155e04320bd1785cd3f5fe6583d Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 14 Apr 2023 19:20:38 +0200 Subject: [PATCH 20/56] Serialize debug_data when present in GOAWAY frames --- src/frame/go_away.rs | 13 +++++++++++-- src/proto/connection.rs | 23 ++++++++++++++++++++++ src/proto/go_away.rs | 4 ---- src/server.rs | 6 ++++++ tests/h2-support/src/frames.rs | 7 +++++++ tests/h2-tests/tests/server.rs | 35 ++++++++++++++++++++++++++++++++++ 6 files changed, 82 insertions(+), 6 deletions(-) diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 91d9c4c6b..4ab28d514 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -8,7 +8,6 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; pub struct GoAway { last_stream_id: StreamId, error_code: Reason, - #[allow(unused)] debug_data: Bytes, } @@ -21,6 +20,15 @@ impl GoAway { } } + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn with_debug_data(self, debug_data: impl Into) -> Self { + Self { + debug_data: debug_data.into(), + ..self + } + } + pub fn last_stream_id(&self) -> StreamId { self.last_stream_id } @@ -52,9 +60,10 @@ impl GoAway { pub fn encode(&self, dst: &mut B) { tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); - head.encode(8, dst); + head.encode(8 + self.debug_data.len(), dst); dst.put_u32(self.last_stream_id.into()); dst.put_u32(self.error_code.into()); + dst.put(self.debug_data.slice(..)); } } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 619973df8..7ea124e44 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -398,6 +398,18 @@ where self.go_away.go_away_now(frame); } + #[doc(hidden)] + #[cfg(feature = "unstable")] + fn go_away_now_debug_data(&mut self) { + let last_processed_id = self.streams.last_processed_id(); + + let frame = frame::GoAway::new(last_processed_id, Reason::NO_ERROR) + .with_debug_data("something went wrong"); + + self.streams.send_go_away(last_processed_id); + self.go_away.go_away(frame); + } + fn go_away_from_user(&mut self, e: Reason) { let last_processed_id = self.streams.last_processed_id(); let frame = frame::GoAway::new(last_processed_id, e); @@ -576,6 +588,17 @@ where // for a pong before proceeding. self.inner.ping_pong.ping_shutdown(); } + + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn go_away_debug_data(&mut self) { + if self.inner.go_away.is_going_away() { + return; + } + + self.inner.as_dyn().go_away_now_debug_data(); + self.inner.ping_pong.ping_shutdown(); + } } impl Drop for Connection diff --git a/src/proto/go_away.rs b/src/proto/go_away.rs index 759427878..d52252cd7 100644 --- a/src/proto/go_away.rs +++ b/src/proto/go_away.rs @@ -26,10 +26,6 @@ pub(super) struct GoAway { /// were a `frame::GoAway`, it might appear like we eventually wanted to /// serialize it. We **only** want to be able to look up these fields at a /// later time. -/// -/// (Technically, `frame::GoAway` should gain an opaque_debug_data field as -/// well, and we wouldn't want to save that here to accidentally dump in logs, -/// or waste struct space.) #[derive(Debug)] pub(crate) struct GoingAway { /// Stores the highest stream ID of a GOAWAY that has been sent. diff --git a/src/server.rs b/src/server.rs index f1f4cf470..032c0d17a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -544,6 +544,12 @@ where self.connection.go_away_gracefully(); } + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn debug_data_shutdown(&mut self) { + self.connection.go_away_debug_data(); + } + /// Takes a `PingPong` instance from the connection. /// /// # Note diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index bc4e2e708..4ee20dd77 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -305,6 +305,13 @@ impl Mock { self.reason(frame::Reason::NO_ERROR) } + pub fn data(self, debug_data: I) -> Self + where + I: Into, + { + Mock(self.0.with_debug_data(debug_data.into())) + } + pub fn reason(self, reason: frame::Reason) -> Self { Mock(frame::GoAway::new(self.0.last_stream_id(), reason)) } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index c8c1c9d1c..78f4891a1 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -705,6 +705,41 @@ async fn graceful_shutdown() { join(client, srv).await; } +#[tokio::test] +async fn go_away_sends_debug_data() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame(frames::headers(1).request("POST", "https://example.com/")) + .await; + client + .recv_frame(frames::go_away(1).no_error().data("something went wrong")) + .await; + }; + + let src = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + let (_req, _tx) = srv.next().await.unwrap().expect("server receives request"); + + srv.debug_data_shutdown(); + + let srv_fut = async move { + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + srv_fut.await + }; + + join(client, src).await; +} + #[tokio::test] async fn goaway_even_if_client_sent_goaway() { h2_support::trace_init!(); From b0e5470ae557845e5b1f5c304f59f21b1f47f5d8 Mon Sep 17 00:00:00 2001 From: Martijn Gribnau Date: Fri, 28 Apr 2023 01:44:00 +0200 Subject: [PATCH 21/56] Fix markdown code element in error::is_library --- src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index 1b1438e48..eb2b2acbc 100644 --- a/src/error.rs +++ b/src/error.rs @@ -104,7 +104,7 @@ impl Error { ) } - /// Returns true if the error was created by `h2. + /// Returns true if the error was created by `h2`. /// /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM. pub fn is_library(&self) -> bool { From 70eade5b4bf70c3c117ffc21b14bdfbd9a308567 Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 28 Apr 2023 16:25:56 +0200 Subject: [PATCH 22/56] Add too_many_resets debug_data to the GOAWAY we send (#678) Closes hyperium/hyper#3211 --- src/frame/go_away.rs | 9 +++---- src/proto/connection.rs | 27 ++++----------------- src/proto/error.rs | 4 +++ src/proto/streams/recv.rs | 5 +++- src/server.rs | 6 ----- tests/h2-support/src/frames.rs | 12 +++++++-- tests/h2-tests/tests/server.rs | 35 --------------------------- tests/h2-tests/tests/stream_states.rs | 7 +++++- 8 files changed, 33 insertions(+), 72 deletions(-) diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 4ab28d514..99330e981 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -20,12 +20,11 @@ impl GoAway { } } - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn with_debug_data(self, debug_data: impl Into) -> Self { + pub fn with_debug_data(last_stream_id: StreamId, reason: Reason, debug_data: Bytes) -> Self { Self { - debug_data: debug_data.into(), - ..self + last_stream_id, + error_code: reason, + debug_data, } } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 7ea124e44..727643a65 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -398,16 +398,10 @@ where self.go_away.go_away_now(frame); } - #[doc(hidden)] - #[cfg(feature = "unstable")] - fn go_away_now_debug_data(&mut self) { + fn go_away_now_data(&mut self, e: Reason, data: Bytes) { let last_processed_id = self.streams.last_processed_id(); - - let frame = frame::GoAway::new(last_processed_id, Reason::NO_ERROR) - .with_debug_data("something went wrong"); - - self.streams.send_go_away(last_processed_id); - self.go_away.go_away(frame); + let frame = frame::GoAway::with_debug_data(last_processed_id, e, data); + self.go_away.go_away_now(frame); } fn go_away_from_user(&mut self, e: Reason) { @@ -430,7 +424,7 @@ where // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. Err(Error::GoAway(debug_data, reason, initiator)) => { - let e = Error::GoAway(debug_data, reason, initiator); + let e = Error::GoAway(debug_data.clone(), reason, initiator); tracing::debug!(error = ?e, "Connection::poll; connection error"); // We may have already sent a GOAWAY for this error, @@ -447,7 +441,7 @@ where // Reset all active streams self.streams.handle_error(e); - self.go_away_now(reason); + self.go_away_now_data(reason, debug_data); Ok(()) } // Attempting to read a frame resulted in a stream level error. @@ -588,17 +582,6 @@ where // for a pong before proceeding. self.inner.ping_pong.ping_shutdown(); } - - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn go_away_debug_data(&mut self) { - if self.inner.go_away.is_going_away() { - return; - } - - self.inner.as_dyn().go_away_now_debug_data(); - self.inner.ping_pong.ping_shutdown(); - } } impl Drop for Connection diff --git a/src/proto/error.rs b/src/proto/error.rs index 2c00c7ea6..ad023317e 100644 --- a/src/proto/error.rs +++ b/src/proto/error.rs @@ -40,6 +40,10 @@ impl Error { Self::GoAway(Bytes::new(), reason, Initiator::Library) } + pub(crate) fn library_go_away_data(reason: Reason, debug_data: impl Into) -> Self { + Self::GoAway(debug_data.into(), reason, Initiator::Library) + } + pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self { Self::Reset(stream_id, reason, Initiator::Remote) } diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 0fe2bdd57..cfc357082 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -763,7 +763,10 @@ impl Recv { "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", counts.max_remote_reset_streams(), ); - return Err(Error::library_go_away(Reason::ENHANCE_YOUR_CALM)); + return Err(Error::library_go_away_data( + Reason::ENHANCE_YOUR_CALM, + "too_many_resets", + )); } } diff --git a/src/server.rs b/src/server.rs index 032c0d17a..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -544,12 +544,6 @@ where self.connection.go_away_gracefully(); } - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn debug_data_shutdown(&mut self) { - self.connection.go_away_debug_data(); - } - /// Takes a `PingPong` instance from the connection. /// /// # Note diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 4ee20dd77..d302d3ce5 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -309,11 +309,19 @@ impl Mock { where I: Into, { - Mock(self.0.with_debug_data(debug_data.into())) + Mock(frame::GoAway::with_debug_data( + self.0.last_stream_id(), + self.0.reason(), + debug_data.into(), + )) } pub fn reason(self, reason: frame::Reason) -> Self { - Mock(frame::GoAway::new(self.0.last_stream_id(), reason)) + Mock(frame::GoAway::with_debug_data( + self.0.last_stream_id(), + reason, + self.0.debug_data().clone(), + )) } } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 78f4891a1..c8c1c9d1c 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -705,41 +705,6 @@ async fn graceful_shutdown() { join(client, srv).await; } -#[tokio::test] -async fn go_away_sends_debug_data() { - h2_support::trace_init!(); - - let (io, mut client) = mock::new(); - - let client = async move { - let settings = client.assert_server_handshake().await; - assert_default_settings!(settings); - client - .send_frame(frames::headers(1).request("POST", "https://example.com/")) - .await; - client - .recv_frame(frames::go_away(1).no_error().data("something went wrong")) - .await; - }; - - let src = async move { - let mut srv = server::handshake(io).await.expect("handshake"); - let (_req, _tx) = srv.next().await.unwrap().expect("server receives request"); - - srv.debug_data_shutdown(); - - let srv_fut = async move { - poll_fn(move |cx| srv.poll_closed(cx)) - .await - .expect("server"); - }; - - srv_fut.await - }; - - join(client, src).await; -} - #[tokio::test] async fn goaway_even_if_client_sent_goaway() { h2_support::trace_init!(); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 138328efa..c28066d2c 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -211,9 +211,14 @@ async fn reset_streams_dont_grow_memory_continuously() { .await; client.send_frame(frames::reset(n).protocol_error()).await; } + tokio::time::timeout( std::time::Duration::from_secs(1), - client.recv_frame(frames::go_away((MAX * 2 + 1) as u32).calm()), + client.recv_frame( + frames::go_away((MAX * 2 + 1) as u32) + .data("too_many_resets") + .calm(), + ), ) .await .expect("client goaway"); From 7a77f93ca3a6fea028267a9066c8b976c49203b5 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Wed, 10 May 2023 09:58:02 +0200 Subject: [PATCH 23/56] Rename is_local_reset to is_local_error It also returns true for I/O errors and local GO_AWAYs. --- src/proto/streams/recv.rs | 4 ++-- src/proto/streams/state.rs | 2 +- src/proto/streams/streams.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index cfc357082..8c7267a9d 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -537,7 +537,7 @@ impl Recv { let sz = sz as WindowSize; - let is_ignoring_frame = stream.state.is_local_reset(); + let is_ignoring_frame = stream.state.is_local_error(); if !is_ignoring_frame && !stream.state.is_recv_streaming() { // TODO: There are cases where this can be a stream error of @@ -853,7 +853,7 @@ impl Recv { /// Add a locally reset stream to queue to be eventually reaped. pub fn enqueue_reset_expiration(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { - if !stream.state.is_local_reset() || stream.is_pending_reset_expiration() { + if !stream.state.is_local_error() || stream.is_pending_reset_expiration() { return; } diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 76638fc87..72edbae77 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -352,7 +352,7 @@ impl State { matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..))) } - pub fn is_local_reset(&self) -> bool { + pub fn is_local_error(&self) -> bool { match self.inner { Closed(Cause::Error(ref e)) => e.is_local(), Closed(Cause::ScheduledLibraryReset(..)) => true, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dbaebfa7a..dfc5c768b 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -448,7 +448,7 @@ impl Inner { let stream = self.store.resolve(key); - if stream.state.is_local_reset() { + if stream.state.is_local_error() { // Locally reset streams must ignore frames "for some time". // This is because the remote may have sent trailers before // receiving the RST_STREAM frame. From 3d558a6ed0b2cbaeb11805a7e6ecd53e38b508d6 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Wed, 10 May 2023 10:00:39 +0200 Subject: [PATCH 24/56] Ignore Error::GoAway in State::is_remote_reset When Streams::recv_go_away is called, Recv::handle_error is called on every stream whose stream id is past the GO_AWAY's last stream id, and those streams may have been pending-accepting. If the stream had not encountered an error before, Recv::handle_error then sets its state to State::Closed(Error::GoAway(_, _, Initiator::Remote)) which makes State::is_remote_reset return true in Streams::next_incoming, which leads to Counts::dec_remote_reset_streams being called even though Counts::inc_remote_reset_streams was never called for that stream, causing a panic about the counter being 0. --- src/proto/streams/state.rs | 2 +- tests/h2-tests/tests/stream_states.rs | 47 +++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 72edbae77..6f89b34c5 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -362,7 +362,7 @@ impl State { pub fn is_remote_reset(&self) -> bool { match self.inner { - Closed(Cause::Error(ref e)) => !e.is_local(), + Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) => true, _ => false, } } diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index c28066d2c..423129630 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -240,6 +240,53 @@ async fn reset_streams_dont_grow_memory_continuously() { join(srv, client).await; } +#[tokio::test] +async fn go_away_with_pending_accepting() { + // h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let (sent_go_away_tx, sent_go_away_rx) = oneshot::channel(); + let (recv_go_away_tx, recv_go_away_rx) = oneshot::channel(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + + client + .send_frame(frames::headers(1).request("GET", "https://baguette/").eos()) + .await; + + client + .send_frame(frames::headers(3).request("GET", "https://campagne/").eos()) + .await; + client.send_frame(frames::go_away(1).protocol_error()).await; + + sent_go_away_tx.send(()).unwrap(); + + recv_go_away_rx.await.unwrap(); + }; + + let srv = async move { + let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(1) + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let (_req_1, _send_response_1) = srv.accept().await.unwrap().unwrap(); + + poll_fn(|cx| srv.poll_closed(cx)) + .drive(sent_go_away_rx) + .await + .unwrap(); + + let (_req_2, _send_response_2) = srv.accept().await.unwrap().unwrap(); + + recv_go_away_tx.send(()).unwrap(); + }; + join(srv, client).await; +} + #[tokio::test] async fn pending_accept_reset_streams_decrement_too() { h2_support::trace_init!(); From f126229cf436b3609236582d80a5c25cc944dd4b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 12 May 2023 14:38:36 -0400 Subject: [PATCH 25/56] v0.3.19 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31852daff..875cc70b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.19 (May 12, 2023) + +* Fix counting reset streams when triggered by a GOAWAY. +* Send `too_many_resets` in opaque debug data of GOAWAY when too many resets received. + # 0.3.18 (April 17, 2023) * Fix panic because of opposite check in `is_remote_local()`. diff --git a/Cargo.toml b/Cargo.toml index 767961d0a..747ae7638 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.18" +version = "0.3.19" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 420e0fee1..830147113 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.18")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.19")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 04e6398bfe0cd9cb9590bc198c0921ac6441aea9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 23 May 2023 16:57:04 -0400 Subject: [PATCH 26/56] fix: panicked when a reset stream would decrement twice --- src/proto/streams/recv.rs | 9 --------- tests/h2-tests/tests/stream_states.rs | 12 ++++++------ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 8c7267a9d..ec4db1c79 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -859,15 +859,6 @@ impl Recv { tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); - if !counts.can_inc_num_reset_streams() { - // try to evict 1 stream if possible - // if max allow is 0, this won't be able to evict, - // and then we'll just bail after - if let Some(evicted) = self.pending_reset_expired.pop(stream.store_mut()) { - counts.transition_after(evicted, true); - } - } - if counts.can_inc_num_reset_streams() { counts.inc_num_reset_streams(); self.pending_reset_expired.push(stream); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 423129630..16d113132 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -750,14 +750,14 @@ async fn rst_stream_max() { srv.recv_frame(frames::reset(1).cancel()).await; srv.recv_frame(frames::reset(3).cancel()).await; // sending frame after canceled! - // newer streams trump older streams - // 3 is still being ignored - srv.send_frame(frames::data(3, vec![0; 16]).eos()).await; + // olders streams trump newer streams + // 1 is still being ignored + srv.send_frame(frames::data(1, vec![0; 16]).eos()).await; // ping pong to be sure of no goaway srv.ping_pong([1; 8]).await; - // 1 has been evicted, will get a reset - srv.send_frame(frames::data(1, vec![0; 16]).eos()).await; - srv.recv_frame(frames::reset(1).stream_closed()).await; + // 3 has been evicted, will get a reset + srv.send_frame(frames::data(3, vec![0; 16]).eos()).await; + srv.recv_frame(frames::reset(3).stream_closed()).await; }; let client = async move { From 66c36c4edb04d8f75ca66b9199546308fe089c0d Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Tue, 23 May 2023 15:58:30 +0000 Subject: [PATCH 27/56] fix panic on receiving invalid headers frame by making the `take_request` function return a Result Signed-off-by: Michael Rodler Reviewed-by: Daniele Ahmed --- src/proto/streams/recv.rs | 11 ++++++----- src/proto/streams/streams.rs | 2 +- src/server.rs | 17 ++++++++++++----- tests/h2-tests/tests/server.rs | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index ec4db1c79..cd96dce2c 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -251,14 +251,15 @@ impl Recv { } /// Called by the server to get the request - /// - /// TODO: Should this fn return `Result`? - pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { + pub fn take_request(&mut self, stream: &mut store::Ptr) -> Result, proto::Error> { use super::peer::PollMessage::*; match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(request))) => request, - _ => panic!(), + Some(Event::Headers(Server(request))) => Ok(request), + _ => { + proto_err!(stream: "received invalid request; stream={:?}", stream.id); + Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)) + } } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dfc5c768b..d64e00970 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1178,7 +1178,7 @@ impl StreamRef { /// # Panics /// /// This function panics if the request isn't present. - pub fn take_request(&self) -> Request<()> { + pub fn take_request(&self) -> Result, proto::Error> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; diff --git a/src/server.rs b/src/server.rs index f1f4cf470..148cad517 100644 --- a/src/server.rs +++ b/src/server.rs @@ -425,13 +425,20 @@ where if let Some(inner) = self.connection.next_incoming() { tracing::trace!("received incoming"); - let (head, _) = inner.take_request().into_parts(); - let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); + match inner.take_request() { + Ok(req) => { + let (head, _) = req.into_parts(); + let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); - let request = Request::from_parts(head, body); - let respond = SendResponse { inner }; + let request = Request::from_parts(head, body); + let respond = SendResponse { inner }; - return Poll::Ready(Some(Ok((request, respond)))); + return Poll::Ready(Some(Ok((request, respond)))); + } + Err(e) => { + return Poll::Ready(Some(Err(e.into()))); + } + } } Poll::Pending diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index c8c1c9d1c..2637011ff 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1378,3 +1378,35 @@ async fn reject_non_authority_target_on_connect_request() { join(client, srv).await; } + +#[tokio::test] +async fn reject_response_headers_in_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let _ = client.assert_server_handshake().await; + + client.send_frame(frames::headers(1).response(128)).await; + + // TODO: is CANCEL the right error code to expect here? + client.recv_frame(frames::reset(1).cancel()).await; + }; + + let srv = async move { + let builder = server::Builder::new(); + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + let res = srv.next().await; + tracing::warn!("{:?}", res); + assert!(res.is_some()); + assert!(res.unwrap().is_err()); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} From 97bc3e36cf299e4e064653ced3352fc82a9cea70 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Thu, 8 Jun 2023 19:19:55 +0200 Subject: [PATCH 28/56] hammer test requires a new tokio feature Signed-off-by: Michael Rodler Reviewed-by: Daniele Ahmed --- tests/h2-tests/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index 33436f3c4..6afdf9053 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -11,4 +11,4 @@ edition = "2018" h2-support = { path = "../h2-support" } tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } -tokio = { version = "1", features = ["macros", "net", "rt", "io-util"] } +tokio = { version = "1", features = ["macros", "net", "rt", "io-util", "rt-multi-thread"] } From 972fb6f19ff195f9ea3920b40c862c60b898e791 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 12 Jun 2023 13:49:01 -0400 Subject: [PATCH 29/56] chore: add funding file --- .github/FUNDING.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..00642f837 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: seanmonstar From 864430c5dd453b70c29bb3d058e81876858380f4 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Fri, 9 Jun 2023 17:07:18 +0000 Subject: [PATCH 30/56] Enabled clippy in CI and ran `clippy --fix` Signed-off-by: Michael Rodler --- .github/workflows/CI.yml | 7 +++++++ src/frame/data.rs | 2 +- src/hpack/table.rs | 6 +++--- src/hpack/test/fixture.rs | 2 +- src/lib.rs | 9 +++++++-- src/proto/streams/state.rs | 8 ++++---- 6 files changed, 23 insertions(+), 11 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 2cff15cff..e90c68af7 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -65,6 +65,13 @@ jobs: run: cargo clean; cargo update -Zminimal-versions; cargo check if: matrix.rust == 'nightly' + clippy_check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run Clippy + run: cargo clippy --all-targets --all-features + msrv: name: Check MSRV needs: [style] diff --git a/src/frame/data.rs b/src/frame/data.rs index d0cdf5f69..5ed3c31b5 100644 --- a/src/frame/data.rs +++ b/src/frame/data.rs @@ -148,7 +148,7 @@ impl Data { /// /// Panics if `dst` cannot contain the data frame. pub(crate) fn encode_chunk(&mut self, dst: &mut U) { - let len = self.data.remaining() as usize; + let len = self.data.remaining(); assert!(dst.remaining_mut() >= len); diff --git a/src/hpack/table.rs b/src/hpack/table.rs index a1a780451..3e45f413b 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -319,7 +319,7 @@ impl Table { let mut probe = probe + 1; probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe as usize]; + let pos = &mut self.indices[probe]; prev = match mem::replace(pos, Some(prev)) { Some(p) => p, @@ -656,12 +656,12 @@ fn to_raw_capacity(n: usize) -> usize { #[inline] fn desired_pos(mask: usize, hash: HashValue) -> usize { - (hash.0 & mask) as usize + hash.0 & mask } #[inline] fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { - current.wrapping_sub(desired_pos(mask, hash)) & mask as usize + current.wrapping_sub(desired_pos(mask, hash)) & mask } fn hash_header(header: &Header) -> HashValue { diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 0d33ca2de..d3f76e3bf 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -100,7 +100,7 @@ fn test_story(story: Value) { let mut input: Vec<_> = case .expect .iter() - .map(|&(ref name, ref value)| { + .map(|(name, value)| { Header::new(name.clone().into(), value.clone().into()) .unwrap() .into() diff --git a/src/lib.rs b/src/lib.rs index 830147113..7975ea8c2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,9 +79,14 @@ //! [`client::handshake`]: client/fn.handshake.html #![doc(html_root_url = "https://docs.rs/h2/0.3.19")] -#![deny(missing_debug_implementations, missing_docs)] -#![cfg_attr(test, deny(warnings))] +#![deny( + missing_debug_implementations, + missing_docs, + clippy::missing_safety_doc, + clippy::undocumented_unsafe_blocks +)] #![allow(clippy::type_complexity, clippy::manual_range_contains)] +#![cfg_attr(test, deny(warnings))] macro_rules! proto_err { (conn: $($msg:tt)+) => { diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 6f89b34c5..1ca8f5afb 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -361,10 +361,10 @@ impl State { } pub fn is_remote_reset(&self) -> bool { - match self.inner { - Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) => true, - _ => false, - } + matches!( + self.inner, + Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) + ) } /// Returns true if the stream is already reset. From 478f7b9889e9d8d53756558cca45cebd68aeaea0 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Thu, 22 Jun 2023 18:09:52 +0200 Subject: [PATCH 31/56] Fix for invalid header panic corrected (#695) * Revert "fix panic on receiving invalid headers frame by making the `take_request` function return a Result" This reverts commit 66c36c4edb04d8f75ca66b9199546308fe089c0d. * proper fix for the panic in server receiving a request with a :status pseudo-header in the informational range of status codes --------- Signed-off-by: Michael Rodler Co-authored-by: Michael Rodler Co-authored-by: Daniele Ahmed --- src/proto/streams/recv.rs | 31 ++++++++++++++++++++----------- src/proto/streams/streams.rs | 2 +- src/server.rs | 17 +++++------------ tests/h2-tests/tests/server.rs | 19 ++++++++++--------- 4 files changed, 36 insertions(+), 33 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index cd96dce2c..98de1bfa7 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -229,6 +229,11 @@ impl Recv { return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } + if pseudo.status.is_some() && counts.peer().is_server() { + proto_err!(stream: "cannot use :status header for requests; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); + } + if !pseudo.is_informational() { let message = counts .peer() @@ -239,27 +244,31 @@ impl Recv { .pending_recv .push_back(&mut self.buffer, Event::Headers(message)); stream.notify_recv(); - } - // Only servers can receive a headers frame that initiates the stream. - // This is verified in `Streams` before calling this function. - if counts.peer().is_server() { - self.pending_accept.push(stream); + // Only servers can receive a headers frame that initiates the stream. + // This is verified in `Streams` before calling this function. + if counts.peer().is_server() { + // Correctness: never push a stream to `pending_accept` without having the + // corresponding headers frame pushed to `stream.pending_recv`. + self.pending_accept.push(stream); + } } Ok(()) } /// Called by the server to get the request - pub fn take_request(&mut self, stream: &mut store::Ptr) -> Result, proto::Error> { + /// + /// # Panics + /// + /// Panics if `stream.pending_recv` has no `Event::Headers` queued. + /// + pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { use super::peer::PollMessage::*; match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(request))) => Ok(request), - _ => { - proto_err!(stream: "received invalid request; stream={:?}", stream.id); - Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)) - } + Some(Event::Headers(Server(request))) => request, + _ => unreachable!("server stream queue must start with Headers"), } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index d64e00970..dfc5c768b 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1178,7 +1178,7 @@ impl StreamRef { /// # Panics /// /// This function panics if the request isn't present. - pub fn take_request(&self) -> Result, proto::Error> { + pub fn take_request(&self) -> Request<()> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; diff --git a/src/server.rs b/src/server.rs index 148cad517..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -425,20 +425,13 @@ where if let Some(inner) = self.connection.next_incoming() { tracing::trace!("received incoming"); - match inner.take_request() { - Ok(req) => { - let (head, _) = req.into_parts(); - let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); + let (head, _) = inner.take_request().into_parts(); + let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); - let request = Request::from_parts(head, body); - let respond = SendResponse { inner }; + let request = Request::from_parts(head, body); + let respond = SendResponse { inner }; - return Poll::Ready(Some(Ok((request, respond)))); - } - Err(e) => { - return Poll::Ready(Some(Err(e.into()))); - } - } + return Poll::Ready(Some(Ok((request, respond)))); } Poll::Pending diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 2637011ff..0d7bb61cc 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1380,7 +1380,7 @@ async fn reject_non_authority_target_on_connect_request() { } #[tokio::test] -async fn reject_response_headers_in_request() { +async fn reject_informational_status_header_in_request() { h2_support::trace_init!(); let (io, mut client) = mock::new(); @@ -1388,21 +1388,22 @@ async fn reject_response_headers_in_request() { let client = async move { let _ = client.assert_server_handshake().await; - client.send_frame(frames::headers(1).response(128)).await; + let status_code = 128; + assert!(StatusCode::from_u16(status_code) + .unwrap() + .is_informational()); - // TODO: is CANCEL the right error code to expect here? - client.recv_frame(frames::reset(1).cancel()).await; + client + .send_frame(frames::headers(1).response(status_code)) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; }; let srv = async move { let builder = server::Builder::new(); let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); - let res = srv.next().await; - tracing::warn!("{:?}", res); - assert!(res.is_some()); - assert!(res.unwrap().is_err()); - poll_fn(move |cx| srv.poll_closed(cx)) .await .expect("server"); From 0189722fd64d3cb5acd9764fdb85bb9a95232ea8 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Mon, 26 Jun 2023 14:40:03 +0200 Subject: [PATCH 32/56] Fix for a fuzzer-discovered integer underflow of the flow control window size (#692) Removed the SubAssign, etc. syntactic sugar functions and switched to return Result on over/underflow Whenever possible, switched to returning a library GoAway protocol error. Otherwise we check for over/underflow only with `debug_assert!`, assuming that those code paths do not over/underflow. Signed-off-by: Michael Rodler Signed-off-by: Daniele Ahmed Co-authored-by: Michael Rodler Co-authored-by: Daniele Ahmed --- src/proto/connection.rs | 4 +- src/proto/mod.rs | 2 +- src/proto/streams/flow_control.rs | 73 +++++++------- src/proto/streams/prioritize.rs | 32 +++++-- src/proto/streams/recv.rs | 49 +++++++--- src/proto/streams/send.rs | 26 ++++- src/proto/streams/stream.rs | 12 ++- src/proto/streams/streams.rs | 2 +- tests/h2-tests/tests/flow_control.rs | 136 +++++++++++++++++++++++++++ 9 files changed, 271 insertions(+), 65 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 727643a65..637fac358 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -145,7 +145,9 @@ where /// connection flow control pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { - self.inner.streams.set_target_connection_window_size(size); + let _res = self.inner.streams.set_target_connection_window_size(size); + // TODO: proper error handling + debug_assert!(_res.is_ok()); } /// Send a new SETTINGS frame with an updated initial window size. diff --git a/src/proto/mod.rs b/src/proto/mod.rs index d71ee9c42..567d03060 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -30,7 +30,7 @@ pub type PingPayload = [u8; 8]; pub type WindowSize = u32; // Constants -pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; +pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32 pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index 73a7754db..57a935825 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -75,12 +75,12 @@ impl FlowControl { self.window_size > self.available } - pub fn claim_capacity(&mut self, capacity: WindowSize) { - self.available -= capacity; + pub fn claim_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { + self.available.decrease_by(capacity) } - pub fn assign_capacity(&mut self, capacity: WindowSize) { - self.available += capacity; + pub fn assign_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { + self.available.increase_by(capacity) } /// If a WINDOW_UPDATE frame should be sent, returns a positive number @@ -136,22 +136,23 @@ impl FlowControl { /// /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. - pub fn dec_send_window(&mut self, sz: WindowSize) { + pub fn dec_send_window(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, self.available ); - // This should not be able to overflow `window_size` from the bottom. - self.window_size -= sz; + // ~~This should not be able to overflow `window_size` from the bottom.~~ wrong. it can. + self.window_size.decrease_by(sz)?; + Ok(()) } /// Decrement the recv-side window size. /// /// This is called after receiving a SETTINGS ACK frame with a lower /// INITIAL_WINDOW_SIZE value. - pub fn dec_recv_window(&mut self, sz: WindowSize) { + pub fn dec_recv_window(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "dec_recv_window; sz={}; window={}, available={}", sz, @@ -159,13 +160,14 @@ impl FlowControl { self.available ); // This should not be able to overflow `window_size` from the bottom. - self.window_size -= sz; - self.available -= sz; + self.window_size.decrease_by(sz)?; + self.available.decrease_by(sz)?; + Ok(()) } /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. - pub fn send_data(&mut self, sz: WindowSize) { + pub fn send_data(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "send_data; sz={}; window={}; available={}", sz, @@ -176,12 +178,13 @@ impl FlowControl { // If send size is zero it's meaningless to update flow control window if sz > 0 { // Ensure that the argument is correct - assert!(self.window_size >= sz as usize); + assert!(self.window_size.0 >= sz as i32); // Update values - self.window_size -= sz; - self.available -= sz; + self.window_size.decrease_by(sz)?; + self.available.decrease_by(sz)?; } + Ok(()) } } @@ -208,6 +211,29 @@ impl Window { assert!(self.0 >= 0, "negative Window"); self.0 as WindowSize } + + pub fn decrease_by(&mut self, other: WindowSize) -> Result<(), Reason> { + if let Some(v) = self.0.checked_sub(other as i32) { + self.0 = v; + Ok(()) + } else { + Err(Reason::FLOW_CONTROL_ERROR) + } + } + + pub fn increase_by(&mut self, other: WindowSize) -> Result<(), Reason> { + let other = self.add(other)?; + self.0 = other.0; + Ok(()) + } + + pub fn add(&self, other: WindowSize) -> Result { + if let Some(v) = self.0.checked_add(other as i32) { + Ok(Self(v)) + } else { + Err(Reason::FLOW_CONTROL_ERROR) + } + } } impl PartialEq for Window { @@ -230,25 +256,6 @@ impl PartialOrd for Window { } } -impl ::std::ops::SubAssign for Window { - fn sub_assign(&mut self, other: WindowSize) { - self.0 -= other as i32; - } -} - -impl ::std::ops::Add for Window { - type Output = Self; - fn add(self, other: WindowSize) -> Self::Output { - Window(self.0 + other as i32) - } -} - -impl ::std::ops::AddAssign for Window { - fn add_assign(&mut self, other: WindowSize) { - self.0 += other as i32; - } -} - impl fmt::Display for Window { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, f) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 88204ddcc..35795fae4 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -87,7 +87,9 @@ impl Prioritize { flow.inc_window(config.remote_init_window_sz) .expect("invalid initial window size"); - flow.assign_capacity(config.remote_init_window_sz); + // TODO: proper error handling + let _res = flow.assign_capacity(config.remote_init_window_sz); + debug_assert!(_res.is_ok()); tracing::trace!("Prioritize::new; flow={:?}", flow); @@ -253,7 +255,9 @@ impl Prioritize { if available as usize > capacity { let diff = available - capacity as WindowSize; - stream.send_flow.claim_capacity(diff); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(diff); + debug_assert!(_res.is_ok()); self.assign_connection_capacity(diff, stream, counts); } @@ -324,7 +328,9 @@ impl Prioritize { pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { let available = stream.send_flow.available().as_size(); if available > 0 { - stream.send_flow.claim_capacity(available); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(available); + debug_assert!(_res.is_ok()); // Re-assign all capacity to the connection self.assign_connection_capacity(available, stream, counts); } @@ -337,7 +343,9 @@ impl Prioritize { if stream.requested_send_capacity as usize > stream.buffered_send_data { let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize; - stream.send_flow.claim_capacity(reserved); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(reserved); + debug_assert!(_res.is_ok()); self.assign_connection_capacity(reserved, stream, counts); } } @@ -363,7 +371,9 @@ impl Prioritize { let span = tracing::trace_span!("assign_connection_capacity", inc); let _e = span.enter(); - self.flow.assign_capacity(inc); + // TODO: proper error handling + let _res = self.flow.assign_capacity(inc); + debug_assert!(_res.is_ok()); // Assign newly acquired capacity to streams pending capacity. while self.flow.available() > 0 { @@ -443,7 +453,9 @@ impl Prioritize { stream.assign_capacity(assign, self.max_buffer_size); // Claim the capacity from the connection - self.flow.claim_capacity(assign); + // TODO: proper error handling + let _res = self.flow.claim_capacity(assign); + debug_assert!(_res.is_ok()); } tracing::trace!( @@ -763,12 +775,16 @@ impl Prioritize { // Assign the capacity back to the connection that // was just consumed from the stream in the previous // line. - self.flow.assign_capacity(len); + // TODO: proper error handling + let _res = self.flow.assign_capacity(len); + debug_assert!(_res.is_ok()); }); let (eos, len) = tracing::trace_span!("updating connection flow") .in_scope(|| { - self.flow.send_data(len); + // TODO: proper error handling + let _res = self.flow.send_data(len); + debug_assert!(_res.is_ok()); // Wrap the frame's data payload to ensure that the // correct amount of data gets written. diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 98de1bfa7..d0db00dd8 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -90,7 +90,7 @@ impl Recv { // settings flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE) .expect("invalid initial remote window size"); - flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE); + flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap(); Recv { init_window_sz: config.local_init_window_sz, @@ -363,7 +363,9 @@ impl Recv { self.in_flight_data -= capacity; // Assign capacity to connection - self.flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = self.flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); if self.flow.unclaimed_capacity().is_some() { if let Some(task) = task.take() { @@ -391,7 +393,9 @@ impl Recv { stream.in_flight_recv_data -= capacity; // Assign capacity to stream - stream.recv_flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = stream.recv_flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); if stream.recv_flow.unclaimed_capacity().is_some() { // Queue the stream for sending the WINDOW_UPDATE frame. @@ -437,7 +441,11 @@ impl Recv { /// /// The `task` is an optional parked task for the `Connection` that might /// be blocked on needing more window capacity. - pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { + pub fn set_target_connection_window( + &mut self, + target: WindowSize, + task: &mut Option, + ) -> Result<(), Reason> { tracing::trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, @@ -450,11 +458,15 @@ impl Recv { // // Update the flow controller with the difference between the new // target and the current target. - let current = (self.flow.available() + self.in_flight_data).checked_size(); + let current = self + .flow + .available() + .add(self.in_flight_data)? + .checked_size(); if target > current { - self.flow.assign_capacity(target - current); + self.flow.assign_capacity(target - current)?; } else { - self.flow.claim_capacity(current - target); + self.flow.claim_capacity(current - target)?; } // If changing the target capacity means we gained a bunch of capacity, @@ -465,6 +477,7 @@ impl Recv { task.wake(); } } + Ok(()) } pub(crate) fn apply_local_settings( @@ -504,9 +517,13 @@ impl Recv { let dec = old_sz - target; tracing::trace!("decrementing all windows; dec={}", dec); - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - }) + store.try_for_each(|mut stream| { + stream + .recv_flow + .dec_recv_window(dec) + .map_err(proto::Error::library_go_away)?; + Ok::<_, proto::Error>(()) + })?; } Ordering::Greater => { // We must increase the (local) window on every open stream. @@ -519,7 +536,10 @@ impl Recv { .recv_flow .inc_window(inc) .map_err(proto::Error::library_go_away)?; - stream.recv_flow.assign_capacity(inc); + stream + .recv_flow + .assign_capacity(inc) + .map_err(proto::Error::library_go_away)?; Ok::<_, proto::Error>(()) })?; } @@ -626,7 +646,10 @@ impl Recv { } // Update stream level flow control - stream.recv_flow.send_data(sz); + stream + .recv_flow + .send_data(sz) + .map_err(proto::Error::library_go_away)?; // Track the data as in-flight stream.in_flight_recv_data += sz; @@ -667,7 +690,7 @@ impl Recv { } // Update connection level flow control - self.flow.send_data(sz); + self.flow.send_data(sz).map_err(Error::library_go_away)?; // Track the data as in-flight self.in_flight_data += sz; diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 20aba38d4..dcb5225c7 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::codec::UserError; use crate::frame::{self, Reason}; -use crate::proto::{Error, Initiator}; +use crate::proto::{self, Error, Initiator}; use bytes::Buf; use tokio::io::AsyncWrite; @@ -458,10 +458,21 @@ impl Send { tracing::trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; - store.for_each(|mut stream| { + store.try_for_each(|mut stream| { let stream = &mut *stream; - stream.send_flow.dec_send_window(dec); + tracing::trace!( + "decrementing stream window; id={:?}; decr={}; flow={:?}", + stream.id, + dec, + stream.send_flow + ); + + // TODO: this decrement can underflow based on received frames! + stream + .send_flow + .dec_send_window(dec) + .map_err(proto::Error::library_go_away)?; // It's possible that decreasing the window causes // `window_size` (the stream-specific window) to fall below @@ -474,7 +485,10 @@ impl Send { let reclaimed = if available > window_size { // Drop down to `window_size`. let reclaim = available - window_size; - stream.send_flow.claim_capacity(reclaim); + stream + .send_flow + .claim_capacity(reclaim) + .map_err(proto::Error::library_go_away)?; total_reclaimed += reclaim; reclaim } else { @@ -492,7 +506,9 @@ impl Send { // TODO: Should this notify the producer when the capacity // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. - }); + + Ok::<_, proto::Error>(()) + })?; self.prioritize .assign_connection_capacity(total_reclaimed, store, counts); diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 2888d744b..43e313647 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -146,7 +146,9 @@ impl Stream { recv_flow .inc_window(init_recv_window) .expect("invalid initial receive window"); - recv_flow.assign_capacity(init_recv_window); + // TODO: proper error handling? + let _res = recv_flow.assign_capacity(init_recv_window); + debug_assert!(_res.is_ok()); send_flow .inc_window(init_send_window) @@ -275,7 +277,9 @@ impl Stream { pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { let prev_capacity = self.capacity(max_buffer_size); debug_assert!(capacity > 0); - self.send_flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = self.send_flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); tracing::trace!( " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", @@ -294,7 +298,9 @@ impl Stream { pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) { let prev_capacity = self.capacity(max_buffer_size); - self.send_flow.send_data(len); + // TODO: proper error handling + let _res = self.send_flow.send_data(len); + debug_assert!(_res.is_ok()); // Decrement the stream's buffered data counter debug_assert!(self.buffered_send_data >= len as usize); diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dfc5c768b..eab362f73 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -118,7 +118,7 @@ where } } - pub fn set_target_connection_window_size(&mut self, size: WindowSize) { + pub fn set_target_connection_window_size(&mut self, size: WindowSize) -> Result<(), Reason> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 5caa2ec3a..dbb933286 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1858,3 +1858,139 @@ async fn poll_capacity_wakeup_after_window_update() { join(srv, h2).await; } + +#[tokio::test] +async fn window_size_decremented_past_zero() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + // let _ = client.assert_server_handshake().await; + + // preface + client.write_preface().await; + + // the following http 2 bytes are fuzzer-generated + client.send_bytes(&[0, 0, 0, 4, 0, 0, 0, 0, 0]).await; + client + .send_bytes(&[ + 0, 0, 23, 1, 1, 0, 249, 255, 191, 131, 1, 1, 1, 70, 1, 1, 1, 1, 65, 1, 1, 65, 1, 1, + 65, 1, 1, 1, 1, 1, 1, 190, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 9, 247, 0, 121, 255, 255, 184, 1, 65, 1, 1, 1, 1, 1, 1, 190, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[0, 0, 3, 0, 1, 0, 249, 255, 191, 1, 1, 190]) + .await; + client + .send_bytes(&[0, 0, 2, 50, 107, 0, 0, 0, 1, 0, 0]) + .await; + client + .send_bytes(&[0, 0, 5, 2, 0, 0, 0, 0, 1, 128, 0, 55, 0, 0]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[0, 0, 6, 4, 0, 0, 0, 0, 0, 3, 4, 76, 255, 71, 131]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 39, 184, 171, 74, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 30, 4, 0, 0, 0, 0, 0, 0, 4, 56, 184, 171, 125, 65, 0, 35, 65, 65, 65, 61, + 232, 87, 115, 89, 116, 0, 4, 0, 58, 33, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client.send_bytes(&[0, 0, 0, 4, 0, 0, 0, 0, 0]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 177, 1, 44, 0, 0, 0, 1, 67, 67, 67, 67, 67, 67, 131, 134, 5, 61, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 115, 102, 1, 3, 48, 43, + 101, 64, 31, 37, 99, 99, 97, 97, 97, 97, 49, 97, 54, 97, 97, 97, 97, 49, 97, 54, + 97, 99, 54, 53, 53, 51, 53, 99, 99, 97, 97, 99, 97, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 0, 58, 171, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + client + .send_bytes(&[0, 0, 6, 4, 0, 0, 0, 0, 0, 0, 4, 87, 115, 89, 116]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 129, 1, 44, 0, 0, 0, 1, 67, 67, 67, 67, 67, 67, 131, 134, 5, 18, 67, 67, 61, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 48, 54, 53, 55, 114, 1, 4, 97, 49, 51, 116, + 64, 2, 117, 115, 4, 103, 101, 110, 116, 64, 8, 57, 111, 110, 116, 101, 110, 115, + 102, 7, 43, 43, 49, 48, 48, 43, 101, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 0, 58, 171, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + + // TODO: is CANCEL the right error code to expect here? + // client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let builder = server::Builder::new(); + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + // just keep it open + let res = poll_fn(move |cx| srv.poll_closed(cx)).await; + tracing::debug!("{:?}", res); + }; + + join(client, srv).await; +} From 6a75f232330374d5f329aaae91afc2dee7ed2b1f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 26 Jun 2023 08:43:36 -0400 Subject: [PATCH 33/56] v0.3.20 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 875cc70b2..9c035533c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.20 (June 26, 2023) + +* Fix panic if a server received a request with a `:status` pseudo header in the 1xx range. +* Fix panic if a reset stream had pending push promises that were more than allowed. +* Fix potential flow control overflow by subtraction, instead returning a connection error. + # 0.3.19 (May 12, 2023) * Fix counting reset streams when triggered by a GOAWAY. diff --git a/Cargo.toml b/Cargo.toml index 747ae7638..1ef688515 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.19" +version = "0.3.20" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 7975ea8c2..1c5f57625 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.19")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.20")] #![deny( missing_debug_implementations, missing_docs, From 46fb80bfd07b4dc6b8ee87b9c2e6415399830910 Mon Sep 17 00:00:00 2001 From: Artem Medvedev Date: Sat, 22 Jul 2023 12:06:56 +0200 Subject: [PATCH 34/56] test: early server response with data (#703) - fix of the test's name after changing it in #634 - additional test that server also sends data frames correctly --- tests/h2-tests/tests/server.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 0d7bb61cc..33e08c19d 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -553,7 +553,7 @@ async fn recv_connection_header() { } #[tokio::test] -async fn sends_reset_cancel_when_req_body_is_dropped() { +async fn sends_reset_no_error_when_req_body_is_dropped() { h2_support::trace_init!(); let (io, mut client) = mock::new(); @@ -563,8 +563,11 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { client .send_frame(frames::headers(1).request("POST", "https://example.com/")) .await; + // server responded with data before consuming POST-request's body, resulting in `RST_STREAM(NO_ERROR)`. + client.recv_frame(frames::headers(1).response(200)).await; + client.recv_frame(frames::data(1, vec![0; 16384])).await; client - .recv_frame(frames::headers(1).response(200).eos()) + .recv_frame(frames::data(1, vec![0; 16384]).eos()) .await; client .recv_frame(frames::reset(1).reason(Reason::NO_ERROR)) @@ -578,7 +581,8 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { assert_eq!(req.method(), &http::Method::POST); let rsp = http::Response::builder().status(200).body(()).unwrap(); - stream.send_response(rsp, true).unwrap(); + let mut tx = stream.send_response(rsp, false).unwrap(); + tx.send_data(vec![0; 16384 * 2].into(), true).unwrap(); } assert!(srv.next().await.is_none()); }; From 633116ef68b4e7b5c4c5699fb5d10b58ef5818ac Mon Sep 17 00:00:00 2001 From: Artem Medvedev Date: Mon, 24 Jul 2023 18:59:27 +0200 Subject: [PATCH 35/56] fix: do not ignore result of `ensure_recv_open` (#687) --- src/proto/streams/recv.rs | 8 +++++++- src/proto/streams/streams.rs | 6 +++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index d0db00dd8..0063942a4 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -318,7 +318,13 @@ impl Recv { Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response)), Some(_) => panic!("poll_response called after response returned"), None => { - stream.state.ensure_recv_open()?; + if !stream.state.ensure_recv_open()? { + proto_err!(stream: "poll_response: stream={:?} is not opened;", stream.id); + return Poll::Ready(Err(Error::library_reset( + stream.id, + Reason::PROTOCOL_ERROR, + ))); + } stream.recv_task = Some(cx.waker().clone()); Poll::Pending diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index eab362f73..02a0f61b6 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -726,7 +726,11 @@ impl Inner { } // The stream must be receive open - stream.state.ensure_recv_open()?; + if !stream.state.ensure_recv_open()? { + proto_err!(conn: "recv_push_promise: initiating stream is not opened"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + stream.key() } None => { From cdcc641902a2c5b058da62980c5b1bede16671a4 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 10:36:52 -0400 Subject: [PATCH 36/56] msrv: bump to 1.63 (#708) * ci: only check h2 package for msrv * msrv: bump to 1.63, tokio requires it --- .github/workflows/CI.yml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index e90c68af7..1467d9fc7 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -93,4 +93,4 @@ jobs: with: toolchain: ${{ steps.metadata.outputs.msrv }} - - run: cargo check + - run: cargo check -p h2 diff --git a/Cargo.toml b/Cargo.toml index 1ef688515..f97b16bef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" -rust-version = "1.56" +rust-version = "1.63" [features] # Enables `futures::Stream` implementations for various types. From da9f34bc808a38be2c36f0f8e3a78c0164548419 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 11:08:46 -0400 Subject: [PATCH 37/56] chore: fix up some clippy nags (#709) --- src/hpack/decoder.rs | 6 +++--- src/proto/streams/state.rs | 9 ++------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index b45c37927..960cbb143 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -447,7 +447,7 @@ fn decode_int(buf: &mut B, prefix_size: u8) -> Result(buf: &mut B) -> Option { +fn peek_u8(buf: &B) -> Option { if buf.has_remaining() { Some(buf.chunk()[0]) } else { @@ -835,9 +835,9 @@ mod test { fn test_peek_u8() { let b = 0xff; let mut buf = Cursor::new(vec![b]); - assert_eq!(peek_u8(&mut buf), Some(b)); + assert_eq!(peek_u8(&buf), Some(b)); assert_eq!(buf.get_u8(), b); - assert_eq!(peek_u8(&mut buf), None); + assert_eq!(peek_u8(&buf), None); } #[test] diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 1ca8f5afb..5256f09cf 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -64,8 +64,9 @@ enum Inner { Closed(Cause), } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, Default)] enum Peer { + #[default] AwaitingHeaders, Streaming, } @@ -466,9 +467,3 @@ impl Default for State { State { inner: Inner::Idle } } } - -impl Default for Peer { - fn default() -> Self { - AwaitingHeaders - } -} From 9bd62a2efc19da072ff6b55c92586b02a714194e Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Mon, 21 Aug 2023 19:09:26 +0200 Subject: [PATCH 38/56] Test that client reacts correctly on rogue HEADERS (#667) --- tests/h2-tests/tests/client_request.rs | 85 ++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index aff39f5c1..258826d1d 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1454,6 +1454,91 @@ async fn extended_connect_request() { join(srv, h2).await; } +#[tokio::test] +async fn rogue_server_odd_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.send_frame(frames::headers(1)).await; + srv.recv_frame(frames::go_away(0).protocol_error()).await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + + let err = h2.await.unwrap_err(); + assert!(err.is_go_away()); + assert_eq!(err.reason(), Some(Reason::PROTOCOL_ERROR)); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn rogue_server_even_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.send_frame(frames::headers(2)).await; + srv.recv_frame(frames::go_away(0).protocol_error()).await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + + let err = h2.await.unwrap_err(); + assert!(err.is_go_away()); + assert_eq!(err.reason(), Some(Reason::PROTOCOL_ERROR)); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn rogue_server_reused_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://camembert.fromage") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + srv.send_frame(frames::headers(1)).await; + srv.recv_frame(frames::reset(1).stream_closed()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + h2.drive(async { + let request = Request::builder() + .method(Method::GET) + .uri("https://camembert.fromage") + .body(()) + .unwrap(); + let _res = client.send_request(request, true).unwrap().0.await.unwrap(); + }) + .await; + + h2.await.unwrap(); + }; + + join(srv, h2).await; +} + const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From ee042922780651349915afb9cfe9b86d6b1084f9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 13:20:06 -0400 Subject: [PATCH 39/56] Fix opening new streams over max concurrent (#707) There was a bug where opening streams over the max concurrent streams was possible if max_concurrent_streams were lowered beyond the current number of open streams and there were already new streams adding to the pending_send queue. There was two mechanisms for streams to end up in that queue. 1. send_headers would push directly onto pending_send when below max_concurrent_streams 2. prioritize would pop from pending_open until max_concurrent_streams was reached. For case 1, a settings frame could be received after pushing many streams onto pending_send and before the socket was ready to write again. For case 2, the pending_send queue could have Headers frames queued going into a Not Ready state with the socket, a settings frame could be received, and then the headers would be written anyway after the ack. The fix is therefore also two fold. Fixing case 1 is as simple as letting Prioritize decide when to transition streams from `pending_open` to `pending_send` since only it knows the readiness of the socket and whether the headers can be written immediately. This is slightly complicated by the fact that previously SendRequest would block when streams would be added as "pending open". That was addressed by guessing when to block based on max concurrent streams rather than the stream state. The fix for Prioritize was to conservatively pop streams from pending_open when the socket is immediately available for writing a headers frame. This required a change to queuing to support pushing on the front of pending_send to ensure headers frames don't linger in pending_send. The alternative to this was adding a check to pending_send whether a new stream would exceed max concurrent. In that case, headers frames would need to carefully be reenqueued. This seemed to impose more complexity to ensure ordering of stream IDs would be maintained. Closes #704 Closes #706 Co-authored-by: Joe Wilm --- src/client.rs | 6 +- src/proto/streams/counts.rs | 8 +++ src/proto/streams/prioritize.rs | 18 ++++-- src/proto/streams/send.rs | 25 +++++--- src/proto/streams/store.rs | 42 +++++++++++- src/proto/streams/streams.rs | 14 ++-- tests/h2-tests/tests/client_request.rs | 88 ++++++++++++++++++++++++++ tests/h2-tests/tests/server.rs | 4 +- 8 files changed, 179 insertions(+), 26 deletions(-) diff --git a/src/client.rs b/src/client.rs index 4147e8a46..e83ef6a4a 100644 --- a/src/client.rs +++ b/src/client.rs @@ -510,8 +510,10 @@ where self.inner .send_request(request, end_of_stream, self.pending.as_ref()) .map_err(Into::into) - .map(|stream| { - if stream.is_pending_open() { + .map(|(stream, is_full)| { + if stream.is_pending_open() && is_full { + // Only prevent sending another request when the request queue + // is not full. self.pending = Some(stream.clone_to_opaque()); } diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index 6a5aa9ccd..add1312e5 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -49,6 +49,14 @@ impl Counts { } } + /// Returns true when the next opened stream will reach capacity of outbound streams + /// + /// The number of client send streams is incremented in prioritize; send_request has to guess if + /// it should wait before allowing another request to be sent. + pub fn next_send_stream_will_reach_capacity(&self) -> bool { + self.max_send_streams <= (self.num_send_streams + 1) + } + /// Returns the current peer pub fn peer(&self) -> peer::Dyn { self.peer diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 35795fae4..3196049a4 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -520,7 +520,9 @@ impl Prioritize { tracing::trace!("poll_complete"); loop { - self.schedule_pending_open(store, counts); + if let Some(mut stream) = self.pop_pending_open(store, counts) { + self.pending_send.push_front(&mut stream); + } match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { @@ -874,20 +876,24 @@ impl Prioritize { } } - fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { + fn pop_pending_open<'s>( + &mut self, + store: &'s mut Store, + counts: &mut Counts, + ) -> Option> { tracing::trace!("schedule_pending_open"); // check for any pending open streams - while counts.can_inc_num_send_streams() { + if counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { tracing::trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); - self.pending_send.push(&mut stream); stream.notify_send(); - } else { - return; + return Some(stream); } } + + None } } diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index dcb5225c7..626e61a33 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -143,22 +143,27 @@ impl Send { // Update the state stream.state.send_open(end_stream)?; - if counts.peer().is_local_init(frame.stream_id()) { - // If we're waiting on a PushPromise anyway - // handle potentially queueing the stream at that point - if !stream.is_pending_push { - if counts.can_inc_num_send_streams() { - counts.inc_num_send_streams(stream); - } else { - self.prioritize.queue_open(stream); - } - } + let mut pending_open = false; + if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push { + self.prioritize.queue_open(stream); + pending_open = true; } // Queue the frame for sending + // + // This call expects that, since new streams are in the open queue, new + // streams won't be pushed on pending_send. self.prioritize .queue_frame(frame.into(), buffer, stream, task); + // Need to notify the connection when pushing onto pending_open since + // queue_frame only notifies for pending_send. + if pending_open { + if let Some(task) = task.take() { + task.wake(); + } + } + Ok(()) } diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index d33a01cce..67b377b12 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -256,7 +256,7 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push"); + tracing::trace!("Queue::push_back"); if N::is_queued(stream) { tracing::trace!(" -> already queued"); @@ -292,6 +292,46 @@ where true } + /// Queue the stream + /// + /// If the stream is already contained by the list, return `false`. + pub fn push_front(&mut self, stream: &mut store::Ptr) -> bool { + tracing::trace!("Queue::push_front"); + + if N::is_queued(stream) { + tracing::trace!(" -> already queued"); + return false; + } + + N::set_queued(stream, true); + + // The next pointer shouldn't be set + debug_assert!(N::next(stream).is_none()); + + // Queue the stream + match self.indices { + Some(ref mut idxs) => { + tracing::trace!(" -> existing entries"); + + // Update the provided stream to point to the head node + let head_key = stream.resolve(idxs.head).key(); + N::set_next(stream, Some(head_key)); + + // Update the head pointer + idxs.head = stream.key(); + } + None => { + tracing::trace!(" -> first entry"); + self.indices = Some(store::Indices { + head: stream.key(), + tail: stream.key(), + }); + } + } + + true + } + pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option> where R: Resolve, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 02a0f61b6..274bf4553 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -216,7 +216,7 @@ where mut request: Request<()>, end_of_stream: bool, pending: Option<&OpaqueStreamRef>, - ) -> Result, SendError> { + ) -> Result<(StreamRef, bool), SendError> { use super::stream::ContentLength; use http::Method; @@ -298,10 +298,14 @@ where // the lock, so it can't. me.refs += 1; - Ok(StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), - send_buffer: self.send_buffer.clone(), - }) + let is_full = me.counts.next_send_stream_will_reach_capacity(); + Ok(( + StreamRef { + opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), + send_buffer: self.send_buffer.clone(), + }, + is_full, + )) } pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 258826d1d..7b4316004 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -239,6 +239,8 @@ async fn request_over_max_concurrent_streams_errors() { // first request is allowed let (resp1, mut stream1) = client.send_request(request, false).unwrap(); + // as long as we let the connection internals tick + client = h2.drive(client.ready()).await.unwrap(); let request = Request::builder() .method(Method::POST) @@ -284,6 +286,90 @@ async fn request_over_max_concurrent_streams_errors() { join(srv, h2).await; } +#[tokio::test] +async fn recv_decrement_max_concurrent_streams_when_requests_queued() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + + srv.ping_pong([0; 8]).await; + + // limit this server later in life + srv.send_frame(frames::settings().max_concurrent_streams(1)) + .await; + srv.recv_frame(frames::settings_ack()).await; + srv.recv_frame( + frames::headers(3) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.ping_pong([1; 8]).await; + srv.send_frame(frames::headers(3).response(200).eos()).await; + + srv.recv_frame( + frames::headers(5) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(5).response(200).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.expect("handshake"); + // we send a simple req here just to drive the connection so we can + // receive the server settings. + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + // first request is allowed + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + + // first request is allowed + let (resp1, _) = client.send_request(request, true).unwrap(); + + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + + // second request is put into pending_open + let (resp2, _) = client.send_request(request, true).unwrap(); + + h2.drive(async move { + resp1.await.expect("req"); + }) + .await; + join(async move { h2.await.unwrap() }, async move { + resp2.await.unwrap() + }) + .await; + }; + + join(srv, h2).await; +} + #[tokio::test] async fn send_request_poll_ready_when_connection_error() { h2_support::trace_init!(); @@ -336,6 +422,8 @@ async fn send_request_poll_ready_when_connection_error() { // first request is allowed let (resp1, _) = client.send_request(request, true).unwrap(); + // as long as we let the connection internals tick + client = h2.drive(client.ready()).await.unwrap(); let request = Request::builder() .method(Method::POST) diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 33e08c19d..6075c7dcf 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -296,10 +296,10 @@ async fn push_request_against_concurrency() { .await; client.recv_frame(frames::data(2, &b""[..]).eos()).await; client - .recv_frame(frames::headers(1).response(200).eos()) + .recv_frame(frames::headers(4).response(200).eos()) .await; client - .recv_frame(frames::headers(4).response(200).eos()) + .recv_frame(frames::headers(1).response(200).eos()) .await; }; From da38b1c49c39ccf7e2e0dca51ebf8505d6905597 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 13:26:55 -0400 Subject: [PATCH 40/56] v0.3.21 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c035533c..9caebdf40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.21 (August 21, 2023) + +* Fix opening of new streams over peer's max concurrent limit. +* Fix `RecvStream` to return data even if it has received a `CANCEL` stream error. +* Update MSRV to 1.63. + # 0.3.20 (June 26, 2023) * Fix panic if a server received a request with a `:status` pseudo header in the 1xx range. diff --git a/Cargo.toml b/Cargo.toml index f97b16bef..c815ff0e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.20" +version = "0.3.21" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 1c5f57625..a37c8b4c1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.20")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.21")] #![deny( missing_debug_implementations, missing_docs, From 62cf7a606f2c681f62e47d3d04df551db4c010f4 Mon Sep 17 00:00:00 2001 From: tottoto Date: Sat, 16 Sep 2023 09:20:12 +0900 Subject: [PATCH 41/56] Check minimal versions more precisely --- .github/workflows/CI.yml | 19 ++++++++++--------- Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 1467d9fc7..ee920442e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -61,10 +61,6 @@ jobs: run: ./ci/h2spec.sh if: matrix.rust == 'stable' - - name: Check minimal versions - run: cargo clean; cargo update -Zminimal-versions; cargo check - if: matrix.rust == 'nightly' - clippy_check: runs-on: ubuntu-latest steps: @@ -83,14 +79,19 @@ jobs: uses: actions/checkout@v3 - name: Get MSRV from package metadata - id: metadata - run: | - cargo metadata --no-deps --format-version 1 | - jq -r '"msrv=" + (.packages[] | select(.name == "h2")).rust_version' >> $GITHUB_OUTPUT + id: msrv + run: grep rust-version Cargo.toml | cut -d '"' -f2 | sed 's/^/version=/' >> $GITHUB_OUTPUT - name: Install Rust (${{ steps.metadata.outputs.msrv }}) + id: msrv-toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: ${{ steps.metadata.outputs.msrv }} + toolchain: ${{ steps.msrv.outputs.version }} - run: cargo check -p h2 + + - uses: dtolnay/rust-toolchain@nightly + - uses: taiki-e/install-action@cargo-hack + - uses: taiki-e/install-action@cargo-minimal-versions + + - run: cargo +${{ steps.msrv-toolchain.outputs.name }} minimal-versions check diff --git a/Cargo.toml b/Cargo.toml index c815ff0e7..dd351bfa2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ tokio-util = { version = "0.7.1", features = ["codec"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" -tracing = { version = "0.1.21", default-features = false, features = ["std"] } +tracing = { version = "0.1.32", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" indexmap = { version = "1.5.2", features = ["std"] } From a3f01c19fda400196897e07a1b7f6747e17562c7 Mon Sep 17 00:00:00 2001 From: Xiaoya Wei Date: Thu, 28 Sep 2023 22:45:42 +0800 Subject: [PATCH 42/56] perf: Improve throughput when vectored IO is not enabled (#712) As discussed in https://github.com/hyperium/h2/issues/711, the current implementation of sending data is suboptimal when vectored I/O is not enabled: data frame's head is likely to be sent in a separate TCP segment, whose payload is of only 9 bytes. This PR adds some specialized implementaton for non-vectored I/O case. In short, it sets a larget chain threhold, and also makes sure a data frame's head is sent along with the beginning part of the real data payload. All existing unit tests passed. Also I take a look at the e2e https://github.com/hyperium/hyper/blob/0.14.x/benches/end_to_end.rs but realize that all the benchmarks there are for the case of vectored I/O if the OS supports vectored I/O. There isn't a specific case for non-vectored I/O so I am not sure how to proceed with benchmark for performance evaluations. --- Cargo.toml | 2 +- src/codec/framed_write.rs | 84 +++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 49 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dd351bfa2..b11981d26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,7 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.7.1", features = ["codec"] } +tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 4b1b4accc..c88af02da 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -7,8 +7,9 @@ use bytes::{Buf, BufMut, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio_util::io::poll_write_buf; -use std::io::{self, Cursor, IoSlice}; +use std::io::{self, Cursor}; // A macro to get around a method needing to borrow &mut self macro_rules! limited_write_buf { @@ -45,8 +46,11 @@ struct Encoder { /// Max frame size, this is specified by the peer max_frame_size: FrameSize, - /// Whether or not the wrapped `AsyncWrite` supports vectored IO. - is_write_vectored: bool, + /// Chain payloads bigger than this. + chain_threshold: usize, + + /// Min buffer required to attempt to write a frame + min_buffer_capacity: usize, } #[derive(Debug)] @@ -61,14 +65,16 @@ enum Next { /// frame that big. const DEFAULT_BUFFER_CAPACITY: usize = 16 * 1_024; -/// Min buffer required to attempt to write a frame -const MIN_BUFFER_CAPACITY: usize = frame::HEADER_LEN + CHAIN_THRESHOLD; - -/// Chain payloads bigger than this. The remote will never advertise a max frame -/// size less than this (well, the spec says the max frame size can't be less -/// than 16kb, so not even close). +/// Chain payloads bigger than this when vectored I/O is enabled. The remote +/// will never advertise a max frame size less than this (well, the spec says +/// the max frame size can't be less than 16kb, so not even close). const CHAIN_THRESHOLD: usize = 256; +/// Chain payloads bigger than this when vectored I/O is **not** enabled. +/// A larger value in this scenario will reduce the number of small and +/// fragmented data being sent, and hereby improve the throughput. +const CHAIN_THRESHOLD_WITHOUT_VECTORED_IO: usize = 1024; + // TODO: Make generic impl FramedWrite where @@ -76,7 +82,11 @@ where B: Buf, { pub fn new(inner: T) -> FramedWrite { - let is_write_vectored = inner.is_write_vectored(); + let chain_threshold = if inner.is_write_vectored() { + CHAIN_THRESHOLD + } else { + CHAIN_THRESHOLD_WITHOUT_VECTORED_IO + }; FramedWrite { inner, encoder: Encoder { @@ -85,7 +95,8 @@ where next: None, last_data_frame: None, max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, - is_write_vectored, + chain_threshold, + min_buffer_capacity: chain_threshold + frame::HEADER_LEN, }, } } @@ -126,23 +137,17 @@ where Some(Next::Data(ref mut frame)) => { tracing::trace!(queued_data_frame = true); let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); - ready!(write( - &mut self.inner, - self.encoder.is_write_vectored, - &mut buf, - cx, - ))? + ready!(poll_write_buf(Pin::new(&mut self.inner), cx, &mut buf))? } _ => { tracing::trace!(queued_data_frame = false); - ready!(write( - &mut self.inner, - self.encoder.is_write_vectored, - &mut self.encoder.buf, + ready!(poll_write_buf( + Pin::new(&mut self.inner), cx, + &mut self.encoder.buf ))? } - } + }; } match self.encoder.unset_frame() { @@ -165,30 +170,6 @@ where } } -fn write( - writer: &mut T, - is_write_vectored: bool, - buf: &mut B, - cx: &mut Context<'_>, -) -> Poll> -where - T: AsyncWrite + Unpin, - B: Buf, -{ - // TODO(eliza): when tokio-util 0.5.1 is released, this - // could just use `poll_write_buf`... - const MAX_IOVS: usize = 64; - let n = if is_write_vectored { - let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = buf.chunks_vectored(&mut bufs); - ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))? - } else { - ready!(Pin::new(writer).poll_write(cx, buf.chunk()))? - }; - buf.advance(n); - Ok(()).into() -} - #[must_use] enum ControlFlow { Continue, @@ -240,12 +221,17 @@ where return Err(PayloadTooBig); } - if len >= CHAIN_THRESHOLD { + if len >= self.chain_threshold { let head = v.head(); // Encode the frame head to the buffer head.encode(len, self.buf.get_mut()); + if self.buf.get_ref().remaining() < self.chain_threshold { + let extra_bytes = self.chain_threshold - self.buf.remaining(); + self.buf.get_mut().put(v.payload_mut().take(extra_bytes)); + } + // Save the data frame self.next = Some(Next::Data(v)); } else { @@ -305,7 +291,9 @@ where } fn has_capacity(&self) -> bool { - self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY + self.next.is_none() + && (self.buf.get_ref().capacity() - self.buf.get_ref().len() + >= self.min_buffer_capacity) } fn is_empty(&self) -> bool { From 1f247de691ed7db8c10d3d21a0235af9a1757dd9 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Mon, 9 Oct 2023 13:50:35 +0200 Subject: [PATCH 43/56] Update indexmap to version 2 (#698) * Update indexmap to version 2 * Update webpki-roots dev-dep to 0.25 * Update tokio-rustls dev-dep to 0.24 * Update env_logger dev-dep to 0.10 * Remove combined minimal-versions + MSRV check for now --- .github/workflows/CI.yml | 2 -- Cargo.toml | 8 ++++---- examples/akamai.rs | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index ee920442e..4c1990673 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -93,5 +93,3 @@ jobs: - uses: dtolnay/rust-toolchain@nightly - uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-minimal-versions - - - run: cargo +${{ steps.msrv-toolchain.outputs.name }} minimal-versions check diff --git a/Cargo.toml b/Cargo.toml index b11981d26..70156498e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ http = "0.2" tracing = { version = "0.1.32", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" -indexmap = { version = "1.5.2", features = ["std"] } +indexmap = { version = "2", features = ["std"] } [dev-dependencies] @@ -67,9 +67,9 @@ serde_json = "1.0.0" # Examples tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] } -env_logger = { version = "0.9", default-features = false } -tokio-rustls = "0.23.2" -webpki-roots = "0.22.2" +env_logger = { version = "0.10", default-features = false } +tokio-rustls = "0.24" +webpki-roots = "0.25" [package.metadata.docs.rs] features = ["stream"] diff --git a/examples/akamai.rs b/examples/akamai.rs index 1d0b17baf..788bf3005 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -17,7 +17,7 @@ pub async fn main() -> Result<(), Box> { let tls_client_config = std::sync::Arc::new({ let mut root_store = RootCertStore::empty(); - root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { OwnedTrustAnchor::from_subject_spki_name_constraints( ta.subject, ta.spki, From cbe7744c79a838ea91185500d8ede331eccc82c8 Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 22:32:33 +0900 Subject: [PATCH 44/56] chore(ci): update to actions/checkout@v4 (#716) --- .github/workflows/CI.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4c1990673..f115c83bc 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -36,7 +36,7 @@ jobs: - stable steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master @@ -64,7 +64,7 @@ jobs: clippy_check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run Clippy run: cargo clippy --all-targets --all-features @@ -76,7 +76,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get MSRV from package metadata id: msrv From 05cf352ee35b15da6b3c6c68f18068e750dd9198 Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:29:03 +0900 Subject: [PATCH 45/56] chore(ci): add minimal versions checking on stable rust --- .github/workflows/CI.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f115c83bc..14e4a3149 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -90,6 +90,13 @@ jobs: - run: cargo check -p h2 + minimal-versions: + runs-on: ubuntu-latest + needs: [style] + steps: + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-minimal-versions + - run: cargo minimal-versions --ignore-private check From 3cdef9692ef75e4b1e42242bec8b29181847053e Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:30:51 +0900 Subject: [PATCH 46/56] fix(test): mark h2-support as private crate --- tests/h2-support/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index f178178eb..522d904cb 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -2,6 +2,7 @@ name = "h2-support" version = "0.1.0" authors = ["Carl Lerche "] +publish = false edition = "2018" [dependencies] From d03c54a80dad60a4f23e110eee227d24a413b21e Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:31:24 +0900 Subject: [PATCH 47/56] chore(dependencies): update tracing minimal version to 0.1.35 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 70156498e..a567bf538 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" -tracing = { version = "0.1.32", default-features = false, features = ["std"] } +tracing = { version = "0.1.35", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" indexmap = { version = "2", features = ["std"] } From 4aa7b163425648926454564aa4116ed6f20f9fee Mon Sep 17 00:00:00 2001 From: Protryon Date: Wed, 18 Oct 2023 09:29:40 -0700 Subject: [PATCH 48/56] Fix documentation for max_send_buffer_size (#718) --- src/client.rs | 2 +- src/server.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index e83ef6a4a..b329121ab 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1023,7 +1023,7 @@ impl Builder { /// stream have been written to the connection, the send buffer capacity /// will be freed up again. /// - /// The default is currently ~400MB, but may change. + /// The default is currently ~400KB, but may change. /// /// # Panics /// diff --git a/src/server.rs b/src/server.rs index f1f4cf470..bb20adc5d 100644 --- a/src/server.rs +++ b/src/server.rs @@ -937,7 +937,7 @@ impl Builder { /// stream have been written to the connection, the send buffer capacity /// will be freed up again. /// - /// The default is currently ~400MB, but may change. + /// The default is currently ~400KB, but may change. /// /// # Panics /// From 56651e6e513597d105c5df37a5f5937e2ba50be6 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 30 Oct 2023 11:09:40 -0400 Subject: [PATCH 49/56] fix lint about unused import --- src/frame/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frame/mod.rs b/src/frame/mod.rs index 570a162a8..0e8e7035c 100644 --- a/src/frame/mod.rs +++ b/src/frame/mod.rs @@ -69,7 +69,7 @@ pub use crate::hpack::BytesStr; pub use self::settings::{ DEFAULT_INITIAL_WINDOW_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, - MAX_INITIAL_WINDOW_SIZE, MAX_MAX_FRAME_SIZE, + MAX_MAX_FRAME_SIZE, }; pub type FrameSize = u32; From ef743ecb2243786c0573b9fe726290878359689b Mon Sep 17 00:00:00 2001 From: 4JX <4JXcYvmyu3Hz8fV@protonmail.com> Date: Mon, 30 Oct 2023 16:35:36 +0100 Subject: [PATCH 50/56] Add a setter for header_table_size (#638) --- src/client.rs | 33 +++++++++++++++++++++++++ src/codec/framed_read.rs | 6 +++++ src/codec/mod.rs | 5 ++++ src/frame/settings.rs | 2 -- src/proto/settings.rs | 4 +++ tests/h2-support/src/frames.rs | 5 ++++ tests/h2-tests/tests/client_request.rs | 34 ++++++++++++++++++++++++++ 7 files changed, 87 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index b329121ab..35cfc1414 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1072,6 +1072,39 @@ impl Builder { self } + /// Sets the header table size. + /// + /// This setting informs the peer of the maximum size of the header compression + /// table used to encode header blocks, in octets. The encoder may select any value + /// equal to or less than the header table size specified by the sender. + /// + /// The default value is 4,096. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::client::*; + /// # use bytes::Bytes; + /// # + /// # async fn doc(my_io: T) + /// # -> Result<((SendRequest, Connection)), h2::Error> + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let client_fut = Builder::new() + /// .header_table_size(1_000_000) + /// .handshake(my_io); + /// # client_fut.await + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn header_table_size(&mut self, size: u32) -> &mut Self { + self.settings.set_header_table_size(Some(size)); + self + } + /// Sets the first stream ID to something other than 1. #[cfg(feature = "unstable")] pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self { diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index a874d7732..3b0030d93 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -88,6 +88,12 @@ impl FramedRead { pub fn set_max_header_list_size(&mut self, val: usize) { self.max_header_list_size = val; } + + /// Update the header table size setting. + #[inline] + pub fn set_header_table_size(&mut self, val: usize) { + self.hpack.queue_size_update(val); + } } /// Decodes a frame. diff --git a/src/codec/mod.rs b/src/codec/mod.rs index 359adf6e4..6cbdc1e18 100644 --- a/src/codec/mod.rs +++ b/src/codec/mod.rs @@ -95,6 +95,11 @@ impl Codec { self.framed_write().set_header_table_size(val) } + /// Set the decoder header table size size. + pub fn set_recv_header_table_size(&mut self, val: usize) { + self.inner.set_header_table_size(val) + } + /// Set the max header list size that can be received. pub fn set_max_recv_header_list_size(&mut self, val: usize) { self.inner.set_max_header_list_size(val); diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 0c913f059..484498a9d 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -121,11 +121,9 @@ impl Settings { self.header_table_size } - /* pub fn set_header_table_size(&mut self, size: Option) { self.header_table_size = size; } - */ pub fn load(head: Head, payload: &[u8]) -> Result { use self::Setting::*; diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 6cc617209..28065cc68 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -60,6 +60,10 @@ impl Settings { codec.set_max_recv_header_list_size(max as usize); } + if let Some(val) = local.header_table_size() { + codec.set_recv_header_table_size(val as usize); + } + streams.apply_local_settings(local)?; self.local = Local::Synced; Ok(()) diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index d302d3ce5..a76dd3b60 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -391,6 +391,11 @@ impl Mock { self.0.set_enable_connect_protocol(Some(val)); self } + + pub fn header_table_size(mut self, val: u32) -> Self { + self.0.set_header_table_size(Some(val)); + self + } } impl From> for frame::Settings { diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 7b4316004..88c7df464 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1627,6 +1627,40 @@ async fn rogue_server_reused_headers() { join(srv, h2).await; } +#[tokio::test] +async fn client_builder_header_table_size() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + let mut settings = frame::Settings::default(); + + settings.set_header_table_size(Some(10000)); + + let srv = async move { + let recv_settings = srv.assert_client_handshake().await; + assert_frame_eq(recv_settings, settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + }; + + let mut builder = client::Builder::new(); + builder.header_table_size(10000); + + let h2 = async move { + let (mut client, mut h2) = builder.handshake::<_, Bytes>(io).await.unwrap(); + let request = Request::get("https://example.com/").body(()).unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + }; + + join(srv, h2).await; +} + const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From c7ca62f69b3b16d66f088ed2684f4534a8034c76 Mon Sep 17 00:00:00 2001 From: vuittont60 <81072379+vuittont60@users.noreply.github.com> Date: Tue, 7 Nov 2023 19:15:22 +0800 Subject: [PATCH 51/56] docs: fix typos (#724) --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 10e74bf29..8af0abcc7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ ## Getting Help ## If you have a question about the h2 library or have encountered problems using it, you may -[file an issue][issue] or ask ask a question on the [Tokio Gitter][gitter]. +[file an issue][issue] or ask a question on the [Tokio Gitter][gitter]. ## Submitting a Pull Request ## @@ -15,7 +15,7 @@ Do you have an improvement? 2. We will try to respond to your issue promptly. 3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository. 4. Submit a pull request against this repo's `master` branch. -6. Your branch may be merged once all configured checks pass, including: +5. Your branch may be merged once all configured checks pass, including: - Code review has been completed. - The branch has passed tests in CI. From 0f412d8b9c8d309966197873ad1d065adc23c794 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 09:00:16 -0500 Subject: [PATCH 52/56] v0.3.22 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9caebdf40..00d69725a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.22 (November 15, 2023) + +* Add `header_table_size(usize)` option to client and server builders. +* Improve throughput when vectored IO is not available. +* Update indexmap to 2. + # 0.3.21 (August 21, 2023) * Fix opening of new streams over peer's max concurrent limit. diff --git a/Cargo.toml b/Cargo.toml index a567bf538..c413e56ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.21" +version = "0.3.22" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index a37c8b4c1..a1fde6eb4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.21")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.22")] #![deny( missing_debug_implementations, missing_docs, From b668c7fbe22e0cb4a76b0a67498cbb4d0aacbc75 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 10 Jan 2024 15:36:23 -0500 Subject: [PATCH 53/56] fix: streams awaiting capacity lockout (#730) (#734) This PR changes the the assign-capacity queue to prioritize streams that are send-ready. This is necessary to prevent a lockout when streams aren't able to proceed while waiting for connection capacity, but there is none. Closes https://github.com/hyperium/hyper/issues/3338 Co-authored-by: dswij --- src/error.rs | 1 + src/proto/streams/prioritize.rs | 11 ++- tests/h2-tests/tests/prioritization.rs | 97 +++++++++++++++++++++++++- 3 files changed, 106 insertions(+), 3 deletions(-) diff --git a/src/error.rs b/src/error.rs index eb2b2acbc..96a471bcb 100644 --- a/src/error.rs +++ b/src/error.rs @@ -25,6 +25,7 @@ pub struct Error { #[derive(Debug)] enum Kind { /// A RST_STREAM frame was received or sent. + #[allow(dead_code)] Reset(StreamId, Reason, Initiator), /// A GO_AWAY frame was received or sent. diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 3196049a4..999bb0759 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -184,7 +184,15 @@ impl Prioritize { stream.requested_send_capacity = cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize; - self.try_assign_capacity(stream); + // `try_assign_capacity` will queue the stream to `pending_capacity` if the capcaity + // cannot be assigned at the time it is called. + // + // Streams over the max concurrent count will still call `send_data` so we should be + // careful not to put it into `pending_capacity` as it will starve the connection + // capacity for other streams + if !stream.is_pending_open { + self.try_assign_capacity(stream); + } } if frame.is_end_stream() { @@ -522,6 +530,7 @@ impl Prioritize { loop { if let Some(mut stream) = self.pop_pending_open(store, counts) { self.pending_send.push_front(&mut stream); + self.try_assign_capacity(&mut stream); } match self.pop_frame(buffer, store, max_frame_len, counts) { diff --git a/tests/h2-tests/tests/prioritization.rs b/tests/h2-tests/tests/prioritization.rs index 7c2681068..11d2c2ccf 100644 --- a/tests/h2-tests/tests/prioritization.rs +++ b/tests/h2-tests/tests/prioritization.rs @@ -1,5 +1,6 @@ -use futures::future::join; -use futures::{FutureExt, StreamExt}; +use futures::future::{join, select}; +use futures::{pin_mut, FutureExt, StreamExt}; + use h2_support::prelude::*; use h2_support::DEFAULT_WINDOW_SIZE; use std::task::Context; @@ -408,3 +409,95 @@ async fn send_data_receive_window_update() { join(mock, h2).await; } + +#[tokio::test] +async fn stream_count_over_max_stream_limit_does_not_starve_capacity() { + use tokio::sync::oneshot; + + h2_support::trace_init!(); + + let (io, mut srv) = mock::new(); + + let (tx, rx) = oneshot::channel(); + + let srv = async move { + let _ = srv + .assert_client_handshake_with_settings( + frames::settings() + // super tiny server + .max_concurrent_streams(1), + ) + .await; + srv.recv_frame(frames::headers(1).request("POST", "http://example.com/")) + .await; + + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16383]).eos()).await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + + // All of these connection capacities should be assigned to stream 3 + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16383)).await; + + // StreamId(3) should be able to send all of its request with the conn capacity + srv.recv_frame(frames::headers(3).request("POST", "http://example.com/")) + .await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16383]).eos()).await; + srv.send_frame(frames::headers(3).response(200).eos()).await; + + // Then all the future stream is guaranteed to be send-able by induction + tx.send(()).unwrap(); + }; + + fn request() -> Request<()> { + Request::builder() + .method(Method::POST) + .uri("http://example.com/") + .body(()) + .unwrap() + } + + let client = async move { + let (mut client, mut conn) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let (req1, mut send1) = client.send_request(request(), false).unwrap(); + let (req2, mut send2) = client.send_request(request(), false).unwrap(); + + // Use up the connection window. + send1.send_data(vec![0; 65535].into(), true).unwrap(); + // Queue up for more connection window. + send2.send_data(vec![0; 65535].into(), true).unwrap(); + + // Queue up more pending open streams + for _ in 0..5 { + let (_, mut send) = client.send_request(request(), false).unwrap(); + send.send_data(vec![0; 65535].into(), true).unwrap(); + } + + let response = conn.drive(req1).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let response = conn.drive(req2).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let _ = rx.await; + }; + + let task = join(srv, client); + pin_mut!(task); + + let t = tokio::time::sleep(Duration::from_secs(5)).map(|_| panic!("time out")); + pin_mut!(t); + + select(task, t).await; +} From a7eb14a487c0094187314fca63cfe4de4d3d78ef Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 10 Jan 2024 15:37:50 -0500 Subject: [PATCH 54/56] v0.3.23 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00d69725a..94a2dac20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.23 (January 10, 2024) + +* Backport fix from 0.4.1 for stream capacity assignment. + # 0.3.22 (November 15, 2023) * Add `header_table_size(usize)` option to client and server builders. diff --git a/Cargo.toml b/Cargo.toml index c413e56ce..077141a0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.22" +version = "0.3.23" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index a1fde6eb4..cb7028f9b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.22")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.23")] #![deny( missing_debug_implementations, missing_docs, From d919cd6fd8e0f4f5d1f6282fab0b38a1b4bf999c Mon Sep 17 00:00:00 2001 From: Noah Kennedy Date: Wed, 10 Jan 2024 13:37:11 -0600 Subject: [PATCH 55/56] streams: limit error resets for misbehaving connections This change causes GOAWAYs to be issued to misbehaving connections which for one reason or another cause us to emit lots of error resets. Error resets are not generally expected from valid implementations anyways. The threshold after which we issue GOAWAYs is tunable, and will default to 1024. --- src/client.rs | 25 +++++++++++++++++++++++++ src/proto/connection.rs | 2 ++ src/proto/mod.rs | 1 + src/proto/streams/counts.rs | 32 ++++++++++++++++++++++++++++++++ src/proto/streams/mod.rs | 6 ++++++ src/proto/streams/streams.rs | 22 ++++++++++++++++++---- src/server.rs | 29 +++++++++++++++++++++++++++++ 7 files changed, 113 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index 35cfc1414..fe78767f7 100644 --- a/src/client.rs +++ b/src/client.rs @@ -336,6 +336,12 @@ pub struct Builder { /// The stream ID of the first (lowest) stream. Subsequent streams will use /// monotonically increasing stream IDs. stream_id: StreamId, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + local_max_error_reset_streams: Option, } #[derive(Debug)] @@ -645,6 +651,7 @@ impl Builder { initial_max_send_streams: usize::MAX, settings: Default::default(), stream_id: 1.into(), + local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX), } } @@ -973,6 +980,23 @@ impl Builder { self } + /// Sets the maximum number of local resets due to protocol errors made by the remote end. + /// + /// Invalid frames and many other protocol errors will lead to resets being generated for those streams. + /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers. + /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate. + /// + /// When the number of local resets exceeds this threshold, the client will close the connection. + /// + /// If you really want to disable this, supply [`Option::None`] here. + /// Disabling this is not recommended and may expose you to DOS attacks. + /// + /// The default value is currently 1024, but could change. + pub fn max_local_error_reset_streams(&mut self, max: Option) -> &mut Self { + self.local_max_error_reset_streams = max; + self + } + /// Sets the maximum number of pending-accept remotely-reset streams. /// /// Streams that have been received by the peer, but not accepted by the @@ -1293,6 +1317,7 @@ where reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, remote_reset_stream_max: builder.pending_accept_reset_stream_max, + local_error_reset_streams_max: builder.local_max_error_reset_streams, settings: builder.settings.clone(), }, ); diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 637fac358..5d6b9d2b1 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -81,6 +81,7 @@ pub(crate) struct Config { pub reset_stream_duration: Duration, pub reset_stream_max: usize, pub remote_reset_stream_max: usize, + pub local_error_reset_streams_max: Option, pub settings: frame::Settings, } @@ -125,6 +126,7 @@ where .settings .max_concurrent_streams() .map(|max| max as usize), + local_max_error_reset_streams: config.local_error_reset_streams_max, } } let streams = Streams::new(streams_config(&config)); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 567d03060..560927598 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -32,6 +32,7 @@ pub type WindowSize = u32; // Constants pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32 pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; +pub const DEFAULT_LOCAL_RESET_COUNT_MAX: usize = 1024; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index add1312e5..710d42c8d 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -31,6 +31,16 @@ pub(super) struct Counts { /// Current number of "pending accept" streams that were remotely reset num_remote_reset_streams: usize, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + max_local_error_reset_streams: Option, + + /// Total number of locally reset streams due to protocol error across the + /// lifetime of the connection. + num_local_error_reset_streams: usize, } impl Counts { @@ -46,6 +56,8 @@ impl Counts { num_local_reset_streams: 0, max_remote_reset_streams: config.remote_reset_max, num_remote_reset_streams: 0, + max_local_error_reset_streams: config.local_max_error_reset_streams, + num_local_error_reset_streams: 0, } } @@ -66,6 +78,26 @@ impl Counts { self.num_send_streams != 0 || self.num_recv_streams != 0 } + /// Returns true if we can issue another local reset due to protocol error. + pub fn can_inc_num_local_error_resets(&self) -> bool { + if let Some(max) = self.max_local_error_reset_streams { + max > self.num_local_error_reset_streams + } else { + true + } + } + + pub fn inc_num_local_error_resets(&mut self) { + assert!(self.can_inc_num_local_error_resets()); + + // Increment the number of remote initiated streams + self.num_local_error_reset_streams += 1; + } + + pub(crate) fn max_local_error_resets(&self) -> Option { + self.max_local_error_reset_streams + } + /// Returns true if the receive stream concurrency can be incremented pub fn can_inc_num_recv_streams(&self) -> bool { self.max_recv_streams > self.num_recv_streams diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index fbe32c7b0..b347442af 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -69,4 +69,10 @@ pub struct Config { /// Maximum number of remote initiated streams pub remote_max_initiated: Option, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + pub local_max_error_reset_streams: Option, } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 274bf4553..7c00cd517 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1542,10 +1542,24 @@ impl Actions { ) -> Result<(), Error> { if let Err(Error::Reset(stream_id, reason, initiator)) = res { debug_assert_eq!(stream_id, stream.id); - // Reset the stream. - self.send - .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); - Ok(()) + + if counts.can_inc_num_local_error_resets() { + counts.inc_num_local_error_resets(); + + // Reset the stream. + self.send + .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); + Ok(()) + } else { + tracing::warn!( + "reset_on_recv_stream_err; locally-reset streams reached limit ({:?})", + counts.max_local_error_resets().unwrap(), + ); + Err(Error::library_go_away_data( + Reason::ENHANCE_YOUR_CALM, + "too_many_internal_resets", + )) + } } else { res } diff --git a/src/server.rs b/src/server.rs index bb20adc5d..3c945726b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -252,6 +252,12 @@ pub struct Builder { /// Maximum amount of bytes to "buffer" for writing per stream. max_send_buffer_size: usize, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + local_max_error_reset_streams: Option, } /// Send a response back to the client @@ -650,6 +656,8 @@ impl Builder { settings: Settings::default(), initial_target_connection_window_size: None, max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, + + local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX), } } @@ -887,6 +895,24 @@ impl Builder { self } + /// Sets the maximum number of local resets due to protocol errors made by the remote end. + /// + /// Invalid frames and many other protocol errors will lead to resets being generated for those streams. + /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers. + /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate. + /// + /// When the number of local resets exceeds this threshold, the server will issue GOAWAYs with an error code of + /// `ENHANCE_YOUR_CALM` to the client. + /// + /// If you really want to disable this, supply [`Option::None`] here. + /// Disabling this is not recommended and may expose you to DOS attacks. + /// + /// The default value is currently 1024, but could change. + pub fn max_local_error_reset_streams(&mut self, max: Option) -> &mut Self { + self.local_max_error_reset_streams = max; + self + } + /// Sets the maximum number of pending-accept remotely-reset streams. /// /// Streams that have been received by the peer, but not accepted by the @@ -1361,6 +1387,9 @@ where reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, remote_reset_stream_max: self.builder.pending_accept_reset_stream_max, + local_error_reset_streams_max: self + .builder + .local_max_error_reset_streams, settings: self.builder.settings.clone(), }, ); From 7243ab5854b2375213a5a2cdfd543f1d669661e2 Mon Sep 17 00:00:00 2001 From: Noah Kennedy Date: Wed, 17 Jan 2024 13:48:44 -0600 Subject: [PATCH 56/56] Prepare v0.3.24 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94a2dac20..dbd1cbed9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.24 (January 17, 2024) + +* Limit error resets for misbehaving connections. + # 0.3.23 (January 10, 2024) * Backport fix from 0.4.1 for stream capacity assignment. diff --git a/Cargo.toml b/Cargo.toml index 077141a0f..d0b70970f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.23" +version = "0.3.24" license = "MIT" authors = [ "Carl Lerche ",