Bug 1752117 - Neqo version 0.5.7 r=necko-reviewers,valentin

Differential Revision: https://phabricator.services.mozilla.com/D137254
This commit is contained in:
Dragana Damjanovic 2022-01-31 05:17:42 +00:00
parent bfba4ec9f5
commit 2185d4ea34
95 changed files with 6019 additions and 1655 deletions

View file

@ -15,7 +15,7 @@ rev = "029ac0d54b237f27dc7d8d4e51bc0fb076e5e852"
[source."https://github.com/mozilla/neqo"]
git = "https://github.com/mozilla/neqo"
replace-with = "vendored-sources"
tag = "v0.5.6"
tag = "v0.5.7"
[source."https://github.com/mozilla/mp4parse-rust"]
git = "https://github.com/mozilla/mp4parse-rust"

44
Cargo.lock generated
View file

@ -1217,6 +1217,7 @@ dependencies = [
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
]
@ -1439,6 +1440,27 @@ dependencies = [
"packed_simd",
]
[[package]]
name = "enumset"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6216d2c19a6fb5f29d1ada1dc7bc4367a8cbf0fa4af5cf12e07b5bbdde6b5b2c"
dependencies = [
"enumset_derive",
]
[[package]]
name = "enumset_derive"
version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6451128aa6655d880755345d085494cf7561a6bee7c8dc821e5d77e6d267ecd4"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_logger"
version = "0.8.4"
@ -3408,8 +3430,8 @@ dependencies = [
[[package]]
name = "neqo-common"
version = "0.5.6"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.6#b9b6e1d4f459b5fab2c59aa784639a3e9b18538e"
version = "0.5.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.7#f3de275b12c40f45718ce43a0482e771ba6cd4b8"
dependencies = [
"chrono",
"env_logger",
@ -3421,11 +3443,12 @@ dependencies = [
[[package]]
name = "neqo-crypto"
version = "0.5.6"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.6#b9b6e1d4f459b5fab2c59aa784639a3e9b18538e"
version = "0.5.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.7#f3de275b12c40f45718ce43a0482e771ba6cd4b8"
dependencies = [
"bindgen",
"log",
"mozbuild",
"neqo-common",
"serde",
"serde_derive",
@ -3434,9 +3457,10 @@ dependencies = [
[[package]]
name = "neqo-http3"
version = "0.5.6"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.6#b9b6e1d4f459b5fab2c59aa784639a3e9b18538e"
version = "0.5.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.7#f3de275b12c40f45718ce43a0482e771ba6cd4b8"
dependencies = [
"enumset",
"lazy_static",
"log",
"neqo-common",
@ -3451,8 +3475,8 @@ dependencies = [
[[package]]
name = "neqo-qpack"
version = "0.5.6"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.6#b9b6e1d4f459b5fab2c59aa784639a3e9b18538e"
version = "0.5.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.7#f3de275b12c40f45718ce43a0482e771ba6cd4b8"
dependencies = [
"lazy_static",
"log",
@ -3465,8 +3489,8 @@ dependencies = [
[[package]]
name = "neqo-transport"
version = "0.5.6"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.6#b9b6e1d4f459b5fab2c59aa784639a3e9b18538e"
version = "0.5.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.5.7#f3de275b12c40f45718ce43a0482e771ba6cd4b8"
dependencies = [
"indexmap",
"lazy_static",

View file

@ -8,10 +8,10 @@ edition = "2018"
name = "neqo_glue"
[dependencies]
neqo-http3 = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }
xpcom = { path = "../../../xpcom/rust/xpcom" }
@ -25,7 +25,7 @@ static_prefs = { path = "../../../modules/libpref/init/static_prefs", optional =
winapi = {version = "0.3", features = ["ws2def"] }
[dependencies.neqo-crypto]
tag = "v0.5.6"
tag = "v0.5.7"
git = "https://github.com/mozilla/neqo"
default-features = false
features = ["gecko"]

View file

@ -5,17 +5,17 @@ authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
edition = "2018"
[dependencies]
neqo-transport = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.5.6", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.5.7", git = "https://github.com/mozilla/neqo" }
mio = "0.6.17"
mio-extras = "2.0.5"
log = "0.4.0"
base64 = "0.10"
[dependencies.neqo-crypto]
tag = "v0.5.6"
tag = "v0.5.7"
git = "https://github.com/mozilla/neqo"
default-features = false
features = ["gecko"]

View file

@ -0,0 +1 @@
{"files":{"Cargo.toml":"b52ca9236803225007cb39025e05697841c5d9405faa90762959156803897df4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"79b4502d93c23afe2765054a80d03716d4934eb260cdfbe8c401898df3aa5a8f","README.md":"deacdc7fc4a76488c5c1f38f99c4bd1aebfe8ced04c2f4a1f051a9948e9f1f84","src/lib.rs":"72941eea7324a678eaccdce9b6ed9cc451e3e155edae2a9e2fb28b15d897800c","src/repr.rs":"1da63345a5fbfcbcb0332f7c41fa27f4844ca52e26ce8d30aa585a2895f557f0","tests/compile-fail/variants.rs":"32b73a6cfafa330b0fe84cd3bbe12e7d3ba1133aca3fe38bb3e30f54f84ba885","tests/compile-fail/variants.stderr":"f409a02731b9c1c7aa479a462f89c3ffbdb1ac4f48881a0b5771908c4f6eac51","tests/compile-pass/no_imports.rs":"6fa96d43c3970e25dd1e216820dd79f22f5bfe416ce7d6a6df86781797279297","tests/compile-pass/no_std.rs":"e8a402ae12562e6eab4ac531c084278a9dda4ac9b4e64b7a37bb87ce890738e7","tests/ops.rs":"351c54e606e038e39273ef3fb0e6517e83a38fea90e8882415e48500dc195853","tests/serde.rs":"8dd9717ee36b26c4b762a258693030ae27e3990625e513694ff3d68af0a131fd","tests/trybuild.rs":"22e03e02452e47976668c3d4ed54322e4d19a95bd1f72647f5d5792b80e8d0f1"},"package":"6216d2c19a6fb5f29d1ada1dc7bc4367a8cbf0fa4af5cf12e07b5bbdde6b5b2c"}

55
third_party/rust/enumset/Cargo.toml vendored Normal file
View file

@ -0,0 +1,55 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "enumset"
version = "1.0.8"
authors = ["Alissa Rao <lymia@lymiahugs.com>"]
description = "A library for creating compact sets of enums."
documentation = "https://docs.rs/enumset/"
readme = "../README.md"
keywords = ["enum", "bitset"]
categories = ["data-structures"]
license = "MIT/Apache-2.0"
repository = "https://github.com/Lymia/enumset"
[dependencies.enumset_derive]
version = "0.5.5"
[dependencies.serde2]
version = "1.0.91"
optional = true
default-features = false
package = "serde"
[dev-dependencies.bincode]
version = "1.0"
features = ["i128"]
[dev-dependencies.rustversion]
version = "1.0.2"
[dev-dependencies.serde_derive]
version = "1.0.91"
[dev-dependencies.serde_json]
version = "1.0.39"
[dev-dependencies.trybuild]
version = "1.0.24"
[features]
serde = ["serde2", "enumset_derive/serde"]
[badges.maintenance]
status = "actively-developed"
[badges.travis-ci]
branch = "master"
repository = "Lymia/enumset"

201
third_party/rust/enumset/LICENSE-APACHE vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

26
third_party/rust/enumset/LICENSE-MIT vendored Normal file
View file

@ -0,0 +1,26 @@
Copyright (c) 2017-2020 Alissa Rao <lymiahugs@gmail.com>
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

28
third_party/rust/enumset/README.md vendored Normal file
View file

@ -0,0 +1,28 @@
# enumset
[![Build Status](https://api.travis-ci.com/Lymia/enumset.svg?branch=master)](https://travis-ci.com/Lymia/enumset)
[![Latest Version](https://img.shields.io/crates/v/enumset.svg)](https://crates.io/crates/enumset)
![Requires rustc 1.34+](https://img.shields.io/badge/rustc-1.34+-red.svg)
[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/enumset)
A library for defining enums that can be used in compact bit sets.
It supports enums up to 128 variants, and has a macro to use these sets in constants.
See [the documentation](https://docs.rs/enumset) for more information.
# License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in enumset by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

734
third_party/rust/enumset/src/lib.rs vendored Normal file
View file

@ -0,0 +1,734 @@
#![no_std]
#![forbid(missing_docs)]
//! A library for defining enums that can be used in compact bit sets. It supports enums up to 128
//! variants, and has a macro to use these sets in constants.
//!
//! For serde support, enable the `serde` feature.
//!
//! # Defining enums for use with EnumSet
//!
//! Enums to be used with [`EnumSet`] should be defined using `#[derive(EnumSetType)]`:
//!
//! ```rust
//! # use enumset::*;
//! #[derive(EnumSetType, Debug)]
//! pub enum Enum {
//! A, B, C, D, E, F, G,
//! }
//! ```
//!
//! For more information on more advanced use cases, see the documentation for [`EnumSetType`].
//!
//! # Working with EnumSets
//!
//! EnumSets can be constructed via [`EnumSet::new()`] like a normal set. In addition,
//! `#[derive(EnumSetType)]` creates operator overloads that allow you to create EnumSets like so:
//!
//! ```rust
//! # use enumset::*;
//! # #[derive(EnumSetType, Debug)] pub enum Enum { A, B, C, D, E, F, G }
//! let new_set = Enum::A | Enum::C | Enum::G;
//! assert_eq!(new_set.len(), 3);
//! ```
//!
//! All bitwise operations you would expect to work on bitsets also work on both EnumSets and
//! enums with `#[derive(EnumSetType)]`:
//! ```rust
//! # use enumset::*;
//! # #[derive(EnumSetType, Debug)] pub enum Enum { A, B, C, D, E, F, G }
//! // Intersection of sets
//! assert_eq!((Enum::A | Enum::B) & Enum::C, EnumSet::empty());
//! assert_eq!((Enum::A | Enum::B) & Enum::A, Enum::A);
//! assert_eq!(Enum::A & Enum::B, EnumSet::empty());
//!
//! // Symmetric difference of sets
//! assert_eq!((Enum::A | Enum::B) ^ (Enum::B | Enum::C), Enum::A | Enum::C);
//! assert_eq!(Enum::A ^ Enum::C, Enum::A | Enum::C);
//!
//! // Difference of sets
//! assert_eq!((Enum::A | Enum::B | Enum::C) - Enum::B, Enum::A | Enum::C);
//!
//! // Complement of sets
//! assert_eq!(!(Enum::E | Enum::G), Enum::A | Enum::B | Enum::C | Enum::D | Enum::F);
//! ```
//!
//! The [`enum_set!`] macro allows you to create EnumSets in constant contexts:
//!
//! ```rust
//! # use enumset::*;
//! # #[derive(EnumSetType, Debug)] pub enum Enum { A, B, C, D, E, F, G }
//! const CONST_SET: EnumSet<Enum> = enum_set!(Enum::A | Enum::B);
//! assert_eq!(CONST_SET, Enum::A | Enum::B);
//! ```
//!
//! Mutable operations on the [`EnumSet`] otherwise similarly to Rust's builtin sets:
//!
//! ```rust
//! # use enumset::*;
//! # #[derive(EnumSetType, Debug)] pub enum Enum { A, B, C, D, E, F, G }
//! let mut set = EnumSet::new();
//! set.insert(Enum::A);
//! set.insert_all(Enum::E | Enum::G);
//! assert!(set.contains(Enum::A));
//! assert!(!set.contains(Enum::B));
//! assert_eq!(set, Enum::A | Enum::E | Enum::G);
//! ```
pub use enumset_derive::EnumSetType;
use core::cmp::Ordering;
use core::fmt;
use core::fmt::{Debug, Formatter};
use core::hash::{Hash, Hasher};
use core::iter::{FromIterator, Sum};
use core::ops::*;
#[doc(hidden)]
/// Everything in this module is internal API and may change at any time.
pub mod __internal {
use super::*;
/// A reexport of core to allow our macros to be generic to std vs core.
pub use ::core as core_export;
/// A reexport of serde so there is no requirement to depend on serde.
#[cfg(feature = "serde")] pub use serde2 as serde;
/// The actual members of EnumSetType. Put here to avoid polluting global namespaces.
pub unsafe trait EnumSetTypePrivate {
/// The underlying type used to store the bitset.
type Repr: EnumSetTypeRepr;
/// A mask of bits that are valid in the bitset.
const ALL_BITS: Self::Repr;
/// Converts an enum of this type into its bit position.
fn enum_into_u32(self) -> u32;
/// Converts a bit position into an enum value.
unsafe fn enum_from_u32(val: u32) -> Self;
/// Serializes the `EnumSet`.
///
/// This and `deserialize` are part of the `EnumSetType` trait so the procedural derive
/// can control how `EnumSet` is serialized.
#[cfg(feature = "serde")]
fn serialize<S: serde::Serializer>(set: EnumSet<Self>, ser: S) -> Result<S::Ok, S::Error>
where Self: EnumSetType;
/// Deserializes the `EnumSet`.
#[cfg(feature = "serde")]
fn deserialize<'de, D: serde::Deserializer<'de>>(de: D) -> Result<EnumSet<Self>, D::Error>
where Self: EnumSetType;
}
}
use crate::__internal::EnumSetTypePrivate;
#[cfg(feature = "serde")] use crate::__internal::serde;
#[cfg(feature = "serde")] use crate::serde::{Serialize, Deserialize};
mod repr;
use crate::repr::EnumSetTypeRepr;
/// The trait used to define enum types that may be used with [`EnumSet`].
///
/// This trait should be implemented using `#[derive(EnumSetType)]`. Its internal structure is
/// not stable, and may change at any time.
///
/// # Custom Derive
///
/// Any C-like enum is supported, as long as there are no more than 128 variants in the enum,
/// and no variant discriminator is larger than 127.
///
/// The custom derive for [`EnumSetType`] automatically creates implementations of [`PartialEq`],
/// [`Sub`], [`BitAnd`], [`BitOr`], [`BitXor`], and [`Not`] allowing the enum to be used as
/// if it were an [`EnumSet`] in expressions. This can be disabled by adding an `#[enumset(no_ops)]`
/// annotation to the enum.
///
/// The custom derive for `EnumSetType` automatically implements [`Copy`], [`Clone`], [`Eq`], and
/// [`PartialEq`] on the enum. These are required for the [`EnumSet`] to function.
///
/// In addition, if you have renamed the `enumset` crate in your crate, you can use the
/// `#[enumset(crate_name = "enumset2")]` attribute to tell the custom derive to use that name
/// instead.
///
/// Attributes controlling the serialization of an `EnumSet` are documented in
/// [its documentation](./struct.EnumSet.html#serialization).
///
/// # Examples
///
/// Deriving a plain EnumSetType:
///
/// ```rust
/// # use enumset::*;
/// #[derive(EnumSetType)]
/// pub enum Enum {
/// A, B, C, D, E, F, G,
/// }
/// ```
///
/// Deriving a sparse EnumSetType:
///
/// ```rust
/// # use enumset::*;
/// #[derive(EnumSetType)]
/// pub enum SparseEnum {
/// A = 10, B = 20, C = 30, D = 127,
/// }
/// ```
///
/// Deriving an EnumSetType without adding ops:
///
/// ```rust
/// # use enumset::*;
/// #[derive(EnumSetType)]
/// #[enumset(no_ops)]
/// pub enum NoOpsEnum {
/// A, B, C, D, E, F, G,
/// }
/// ```
pub unsafe trait EnumSetType: Copy + Eq + EnumSetTypePrivate { }
/// An efficient set type for enums.
///
/// It is implemented using a bitset stored using the smallest integer that can fit all bits
/// in the underlying enum. In general, an enum variant with a numeric value of `n` is stored in
/// the nth least significant bit (corresponding with a mask of, e.g. `1 << enum as u32`).
///
/// # Serialization
///
/// When the `serde` feature is enabled, `EnumSet`s can be serialized and deserialized using
/// the `serde` crate. The exact serialization format can be controlled with additional attributes
/// on the enum type. These attributes are valid regardless of whether the `serde` feature
/// is enabled.
///
/// By default, `EnumSet`s serialize by directly writing out the underlying bitset as an integer
/// of the smallest type that can fit in the underlying enum. You can add a
/// `#[enumset(serialize_repr = "u8")]` attribute to your enum to control the integer type used
/// for serialization. This can be important for avoiding unintentional breaking changes when
/// `EnumSet`s are serialized with formats like `bincode`.
///
/// By default, unknown bits are ignored and silently removed from the bitset. To override this
/// behavior, you can add a `#[enumset(serialize_deny_unknown)]` attribute. This will cause
/// deserialization to fail if an invalid bit is set.
///
/// In addition, the `#[enumset(serialize_as_list)]` attribute causes the `EnumSet` to be
/// instead serialized as a list of enum variants. This requires your enum type implement
/// [`Serialize`] and [`Deserialize`]. Note that this is a breaking change
#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct EnumSet<T: EnumSetType> {
#[doc(hidden)]
/// This is public due to the [`enum_set!`] macro.
/// This is **NOT** public API and may change at any time.
pub __priv_repr: T::Repr
}
impl <T: EnumSetType> EnumSet<T> {
// Returns all bits valid for the enum
#[inline(always)]
fn all_bits() -> T::Repr {
T::ALL_BITS
}
/// Creates an empty `EnumSet`.
#[inline(always)]
pub fn new() -> Self {
EnumSet { __priv_repr: T::Repr::empty() }
}
/// Returns an `EnumSet` containing a single element.
#[inline(always)]
pub fn only(t: T) -> Self {
let mut set = Self::new();
set.insert(t);
set
}
/// Creates an empty `EnumSet`.
///
/// This is an alias for [`EnumSet::new`].
#[inline(always)]
pub fn empty() -> Self {
Self::new()
}
/// Returns an `EnumSet` containing all valid variants of the enum.
#[inline(always)]
pub fn all() -> Self {
EnumSet { __priv_repr: Self::all_bits() }
}
/// Total number of bits used by this type. Note that the actual amount of space used is
/// rounded up to the next highest integer type (`u8`, `u16`, `u32`, `u64`, or `u128`).
///
/// This is the same as [`EnumSet::variant_count`] except in enums with "sparse" variants.
/// (e.g. `enum Foo { A = 10, B = 20 }`)
#[inline(always)]
pub fn bit_width() -> u32 {
T::Repr::WIDTH - T::ALL_BITS.leading_zeros()
}
/// The number of valid variants that this type can contain.
///
/// This is the same as [`EnumSet::bit_width`] except in enums with "sparse" variants.
/// (e.g. `enum Foo { A = 10, B = 20 }`)
#[inline(always)]
pub fn variant_count() -> u32 {
T::ALL_BITS.count_ones()
}
/// Returns the number of elements in this set.
#[inline(always)]
pub fn len(&self) -> usize {
self.__priv_repr.count_ones() as usize
}
/// Returns `true` if the set contains no elements.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.__priv_repr.is_empty()
}
/// Removes all elements from the set.
#[inline(always)]
pub fn clear(&mut self) {
self.__priv_repr = T::Repr::empty()
}
/// Returns `true` if `self` has no elements in common with `other`. This is equivalent to
/// checking for an empty intersection.
#[inline(always)]
pub fn is_disjoint(&self, other: Self) -> bool {
(*self & other).is_empty()
}
/// Returns `true` if the set is a superset of another, i.e., `self` contains at least all the
/// values in `other`.
#[inline(always)]
pub fn is_superset(&self, other: Self) -> bool {
(*self & other).__priv_repr == other.__priv_repr
}
/// Returns `true` if the set is a subset of another, i.e., `other` contains at least all
/// the values in `self`.
#[inline(always)]
pub fn is_subset(&self, other: Self) -> bool {
other.is_superset(*self)
}
/// Returns a set containing any elements present in either set.
#[inline(always)]
pub fn union(&self, other: Self) -> Self {
EnumSet { __priv_repr: self.__priv_repr | other.__priv_repr }
}
/// Returns a set containing every element present in both sets.
#[inline(always)]
pub fn intersection(&self, other: Self) -> Self {
EnumSet { __priv_repr: self.__priv_repr & other.__priv_repr }
}
/// Returns a set containing element present in `self` but not in `other`.
#[inline(always)]
pub fn difference(&self, other: Self) -> Self {
EnumSet { __priv_repr: self.__priv_repr.and_not(other.__priv_repr) }
}
/// Returns a set containing every element present in either `self` or `other`, but is not
/// present in both.
#[inline(always)]
pub fn symmetrical_difference(&self, other: Self) -> Self {
EnumSet { __priv_repr: self.__priv_repr ^ other.__priv_repr }
}
/// Returns a set containing all enum variants not in this set.
#[inline(always)]
pub fn complement(&self) -> Self {
EnumSet { __priv_repr: !self.__priv_repr & Self::all_bits() }
}
/// Checks whether this set contains a value.
#[inline(always)]
pub fn contains(&self, value: T) -> bool {
self.__priv_repr.has_bit(value.enum_into_u32())
}
/// Adds a value to this set.
///
/// If the set did not have this value present, `true` is returned.
///
/// If the set did have this value present, `false` is returned.
#[inline(always)]
pub fn insert(&mut self, value: T) -> bool {
let contains = !self.contains(value);
self.__priv_repr.add_bit(value.enum_into_u32());
contains
}
/// Removes a value from this set. Returns whether the value was present in the set.
#[inline(always)]
pub fn remove(&mut self, value: T) -> bool {
let contains = self.contains(value);
self.__priv_repr.remove_bit(value.enum_into_u32());
contains
}
/// Adds all elements in another set to this one.
#[inline(always)]
pub fn insert_all(&mut self, other: Self) {
self.__priv_repr = self.__priv_repr | other.__priv_repr
}
/// Removes all values in another set from this one.
#[inline(always)]
pub fn remove_all(&mut self, other: Self) {
self.__priv_repr = self.__priv_repr.and_not(other.__priv_repr);
}
/// Creates an iterator over the values in this set.
///
/// Note that iterator invalidation is impossible as the iterator contains a copy of this type,
/// rather than holding a reference to it.
pub fn iter(&self) -> EnumSetIter<T> {
EnumSetIter::new(*self)
}
}
/// Helper macro for generating conversion functions.
macro_rules! conversion_impls {
(
$(for_num!(
$underlying:ty, $underlying_str:expr,
$from_fn:ident $to_fn:ident $from_fn_opt:ident $to_fn_opt:ident,
$from:ident $try_from:ident $from_truncated:ident
$to:ident $try_to:ident $to_truncated:ident
);)*
) => {
impl <T : EnumSetType> EnumSet<T> {$(
#[doc = "Returns a `"]
#[doc = $underlying_str]
#[doc = "` representing the elements of this set.\n\nIf the underlying bitset will \
not fit in a `"]
#[doc = $underlying_str]
#[doc = "`, this method will panic."]
#[inline(always)]
pub fn $to(&self) -> $underlying {
self.$try_to().expect("Bitset will not fit into this type.")
}
#[doc = "Tries to return a `"]
#[doc = $underlying_str]
#[doc = "` representing the elements of this set.\n\nIf the underlying bitset will \
not fit in a `"]
#[doc = $underlying_str]
#[doc = "`, this method will instead return `None`."]
#[inline(always)]
pub fn $try_to(&self) -> Option<$underlying> {
EnumSetTypeRepr::$to_fn_opt(&self.__priv_repr)
}
#[doc = "Returns a truncated `"]
#[doc = $underlying_str]
#[doc = "` representing the elements of this set.\n\nIf the underlying bitset will \
not fit in a `"]
#[doc = $underlying_str]
#[doc = "`, this method will truncate any bits that don't fit."]
#[inline(always)]
pub fn $to_truncated(&self) -> $underlying {
EnumSetTypeRepr::$to_fn(&self.__priv_repr)
}
#[doc = "Constructs a bitset from a `"]
#[doc = $underlying_str]
#[doc = "`.\n\nIf a bit that doesn't correspond to an enum variant is set, this \
method will panic."]
#[inline(always)]
pub fn $from(bits: $underlying) -> Self {
Self::$try_from(bits).expect("Bitset contains invalid variants.")
}
#[doc = "Attempts to constructs a bitset from a `"]
#[doc = $underlying_str]
#[doc = "`.\n\nIf a bit that doesn't correspond to an enum variant is set, this \
method will return `None`."]
#[inline(always)]
pub fn $try_from(bits: $underlying) -> Option<Self> {
let bits = T::Repr::$from_fn_opt(bits);
let mask = Self::all().__priv_repr;
bits.and_then(|bits| if bits.and_not(mask).is_empty() {
Some(EnumSet { __priv_repr: bits })
} else {
None
})
}
#[doc = "Constructs a bitset from a `"]
#[doc = $underlying_str]
#[doc = "`, ignoring invalid variants."]
#[inline(always)]
pub fn $from_truncated(bits: $underlying) -> Self {
let mask = Self::all().$to_truncated();
let bits = <T::Repr as EnumSetTypeRepr>::$from_fn(bits & mask);
EnumSet { __priv_repr: bits }
}
)*}
}
}
conversion_impls! {
for_num!(u8, "u8", from_u8 to_u8 from_u8_opt to_u8_opt,
from_u8 try_from_u8 from_u8_truncated as_u8 try_as_u8 as_u8_truncated);
for_num!(u16, "u16", from_u16 to_u16 from_u16_opt to_u16_opt,
from_u16 try_from_u16 from_u16_truncated as_u16 try_as_u16 as_u16_truncated);
for_num!(u32, "u32", from_u32 to_u32 from_u32_opt to_u32_opt,
from_u32 try_from_u32 from_u32_truncated as_u32 try_as_u32 as_u32_truncated);
for_num!(u64, "u64", from_u64 to_u64 from_u64_opt to_u64_opt,
from_u64 try_from_u64 from_u64_truncated as_u64 try_as_u64 as_u64_truncated);
for_num!(u128, "u128", from_u128 to_u128 from_u128_opt to_u128_opt,
from_u128 try_from_u128 from_u128_truncated as_u128 try_as_u128 as_u128_truncated);
for_num!(usize, "usize", from_usize to_usize from_usize_opt to_usize_opt,
from_usize try_from_usize from_usize_truncated
as_usize try_as_usize as_usize_truncated);
}
impl <T: EnumSetType> Default for EnumSet<T> {
/// Returns an empty set.
fn default() -> Self {
Self::new()
}
}
impl <T: EnumSetType> IntoIterator for EnumSet<T> {
type Item = T;
type IntoIter = EnumSetIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl <T: EnumSetType> Sum for EnumSet<T> {
fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(EnumSet::empty(), |a, v| a | v)
}
}
impl <'a, T: EnumSetType> Sum<&'a EnumSet<T>> for EnumSet<T> {
fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(EnumSet::empty(), |a, v| a | *v)
}
}
impl <T: EnumSetType> Sum<T> for EnumSet<T> {
fn sum<I: Iterator<Item=T>>(iter: I) -> Self {
iter.fold(EnumSet::empty(), |a, v| a | v)
}
}
impl <'a, T: EnumSetType> Sum<&'a T> for EnumSet<T> {
fn sum<I: Iterator<Item=&'a T>>(iter: I) -> Self {
iter.fold(EnumSet::empty(), |a, v| a | *v)
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> Sub<O> for EnumSet<T> {
type Output = Self;
#[inline(always)]
fn sub(self, other: O) -> Self::Output {
self.difference(other.into())
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitAnd<O> for EnumSet<T> {
type Output = Self;
#[inline(always)]
fn bitand(self, other: O) -> Self::Output {
self.intersection(other.into())
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitOr<O> for EnumSet<T> {
type Output = Self;
#[inline(always)]
fn bitor(self, other: O) -> Self::Output {
self.union(other.into())
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitXor<O> for EnumSet<T> {
type Output = Self;
#[inline(always)]
fn bitxor(self, other: O) -> Self::Output {
self.symmetrical_difference(other.into())
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> SubAssign<O> for EnumSet<T> {
#[inline(always)]
fn sub_assign(&mut self, rhs: O) {
*self = *self - rhs;
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitAndAssign<O> for EnumSet<T> {
#[inline(always)]
fn bitand_assign(&mut self, rhs: O) {
*self = *self & rhs;
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitOrAssign<O> for EnumSet<T> {
#[inline(always)]
fn bitor_assign(&mut self, rhs: O) {
*self = *self | rhs;
}
}
impl <T: EnumSetType, O: Into<EnumSet<T>>> BitXorAssign<O> for EnumSet<T> {
#[inline(always)]
fn bitxor_assign(&mut self, rhs: O) {
*self = *self ^ rhs;
}
}
impl <T: EnumSetType> Not for EnumSet<T> {
type Output = Self;
#[inline(always)]
fn not(self) -> Self::Output {
self.complement()
}
}
impl <T: EnumSetType> From<T> for EnumSet<T> {
fn from(t: T) -> Self {
EnumSet::only(t)
}
}
impl <T: EnumSetType> PartialEq<T> for EnumSet<T> {
fn eq(&self, other: &T) -> bool {
self.__priv_repr == EnumSet::only(*other).__priv_repr
}
}
impl <T: EnumSetType + Debug> Debug for EnumSet<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut is_first = true;
f.write_str("EnumSet(")?;
for v in self.iter() {
if !is_first { f.write_str(" | ")?; }
is_first = false;
v.fmt(f)?;
}
f.write_str(")")?;
Ok(())
}
}
#[allow(clippy::derive_hash_xor_eq)] // This impl exists to change trait bounds only.
impl <T: EnumSetType> Hash for EnumSet<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.__priv_repr.hash(state)
}
}
impl <T: EnumSetType> PartialOrd for EnumSet<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.__priv_repr.partial_cmp(&other.__priv_repr)
}
}
impl <T: EnumSetType> Ord for EnumSet<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.__priv_repr.cmp(&other.__priv_repr)
}
}
#[cfg(feature = "serde")]
impl <T: EnumSetType> Serialize for EnumSet<T> {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
T::serialize(*self, serializer)
}
}
#[cfg(feature = "serde")]
impl <'de, T: EnumSetType> Deserialize<'de> for EnumSet<T> {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
T::deserialize(deserializer)
}
}
/// The iterator used by [`EnumSet`]s.
#[derive(Clone, Debug)]
pub struct EnumSetIter<T: EnumSetType> {
set: EnumSet<T>,
}
impl <T: EnumSetType> EnumSetIter<T> {
fn new(set: EnumSet<T>) -> EnumSetIter<T> {
EnumSetIter { set }
}
}
impl <T: EnumSetType> Iterator for EnumSetIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.set.is_empty() {
None
} else {
let bit = self.set.__priv_repr.trailing_zeros();
self.set.__priv_repr.remove_bit(bit);
unsafe { Some(T::enum_from_u32(bit)) }
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let left = self.set.len();
(left, Some(left))
}
}
impl<T: EnumSetType> ExactSizeIterator for EnumSetIter<T> {}
impl<T: EnumSetType> Extend<T> for EnumSet<T> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
iter.into_iter().for_each(|v| { self.insert(v); });
}
}
impl<T: EnumSetType> FromIterator<T> for EnumSet<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut set = EnumSet::default();
set.extend(iter);
set
}
}
impl<T: EnumSetType> Extend<EnumSet<T>> for EnumSet<T> {
fn extend<I: IntoIterator<Item = EnumSet<T>>>(&mut self, iter: I) {
iter.into_iter().for_each(|v| { self.insert_all(v); });
}
}
impl<T: EnumSetType> FromIterator<EnumSet<T>> for EnumSet<T> {
fn from_iter<I: IntoIterator<Item = EnumSet<T>>>(iter: I) -> Self {
let mut set = EnumSet::default();
set.extend(iter);
set
}
}
/// Creates a EnumSet literal, which can be used in const contexts.
///
/// The syntax used is `enum_set!(Type::A | Type::B | Type::C)`. Each variant must be of the same
/// type, or a error will occur at compile-time.
///
/// This macro accepts trailing `|`s to allow easier use in other macros.
///
/// # Examples
///
/// ```rust
/// # use enumset::*;
/// # #[derive(EnumSetType, Debug)] enum Enum { A, B, C }
/// const CONST_SET: EnumSet<Enum> = enum_set!(Enum::A | Enum::B);
/// assert_eq!(CONST_SET, Enum::A | Enum::B);
/// ```
///
/// This macro is strongly typed. For example, the following will not compile:
///
/// ```compile_fail
/// # use enumset::*;
/// # #[derive(EnumSetType, Debug)] enum Enum { A, B, C }
/// # #[derive(EnumSetType, Debug)] enum Enum2 { A, B, C }
/// let type_error = enum_set!(Enum::A | Enum2::B);
/// ```
#[macro_export]
macro_rules! enum_set {
($(|)*) => {
$crate::EnumSet { __priv_repr: 0 }
};
($value:path $(|)*) => {
{
#[allow(deprecated)] let value = $value.__impl_enumset_internal__const_only();
value
}
};
($value:path | $($rest:path)|* $(|)*) => {
{
#[allow(deprecated)] let value = $value.__impl_enumset_internal__const_only();
$(#[allow(deprecated)] let value = $rest.__impl_enumset_internal__const_merge(value);)*
value
}
};
}

163
third_party/rust/enumset/src/repr.rs vendored Normal file
View file

@ -0,0 +1,163 @@
use core::convert::TryInto;
use core::fmt::{Debug};
use core::hash::{Hash};
use core::ops::*;
/// A trait marking valid underlying bitset storage types and providing the
/// operations `EnumSet` and related types use.
pub trait EnumSetTypeRepr :
// Basic traits used to derive traits
Copy +
Ord +
Eq +
Debug +
Hash +
// Operations used by enumset
BitAnd<Output = Self> +
BitOr<Output = Self> +
BitXor<Output = Self> +
Not<Output = Self> +
{
const WIDTH: u32;
fn is_empty(&self) -> bool;
fn empty() -> Self;
fn add_bit(&mut self, bit: u32);
fn remove_bit(&mut self, bit: u32);
fn has_bit(&self, bit: u32) -> bool;
fn count_ones(&self) -> u32;
fn count_remaining_ones(&self, cursor: u32) -> usize;
fn leading_zeros(&self) -> u32;
fn trailing_zeros(&self) -> u32;
fn and_not(&self, other: Self) -> Self;
fn from_u8(v: u8) -> Self;
fn from_u16(v: u16) -> Self;
fn from_u32(v: u32) -> Self;
fn from_u64(v: u64) -> Self;
fn from_u128(v: u128) -> Self;
fn from_usize(v: usize) -> Self;
fn to_u8(&self) -> u8;
fn to_u16(&self) -> u16;
fn to_u32(&self) -> u32;
fn to_u64(&self) -> u64;
fn to_u128(&self) -> u128;
fn to_usize(&self) -> usize;
fn from_u8_opt(v: u8) -> Option<Self>;
fn from_u16_opt(v: u16) -> Option<Self>;
fn from_u32_opt(v: u32) -> Option<Self>;
fn from_u64_opt(v: u64) -> Option<Self>;
fn from_u128_opt(v: u128) -> Option<Self>;
fn from_usize_opt(v: usize) -> Option<Self>;
fn to_u8_opt(&self) -> Option<u8>;
fn to_u16_opt(&self) -> Option<u16>;
fn to_u32_opt(&self) -> Option<u32>;
fn to_u64_opt(&self) -> Option<u64>;
fn to_u128_opt(&self) -> Option<u128>;
fn to_usize_opt(&self) -> Option<usize>;
}
macro_rules! prim {
($name:ty, $width:expr) => {
impl EnumSetTypeRepr for $name {
const WIDTH: u32 = $width;
#[inline(always)]
fn is_empty(&self) -> bool { *self == 0 }
#[inline(always)]
fn empty() -> Self { 0 }
#[inline(always)]
fn add_bit(&mut self, bit: u32) {
*self |= 1 << bit as $name;
}
#[inline(always)]
fn remove_bit(&mut self, bit: u32) {
*self &= !(1 << bit as $name);
}
#[inline(always)]
fn has_bit(&self, bit: u32) -> bool {
(self & (1 << bit as $name)) != 0
}
#[inline(always)]
fn count_ones(&self) -> u32 { (*self).count_ones() }
#[inline(always)]
fn leading_zeros(&self) -> u32 { (*self).leading_zeros() }
#[inline(always)]
fn trailing_zeros(&self) -> u32 { (*self).trailing_zeros() }
#[inline(always)]
fn and_not(&self, other: Self) -> Self { (*self) & !other }
#[inline(always)]
fn count_remaining_ones(&self, cursor: u32) -> usize {
let left_mask =
!((1 as $name).checked_shl(cursor).unwrap_or(0).wrapping_sub(1));
(*self & left_mask).count_ones() as usize
}
#[inline(always)]
fn from_u8(v: u8) -> Self { v as $name }
#[inline(always)]
fn from_u16(v: u16) -> Self { v as $name }
#[inline(always)]
fn from_u32(v: u32) -> Self { v as $name }
#[inline(always)]
fn from_u64(v: u64) -> Self { v as $name }
#[inline(always)]
fn from_u128(v: u128) -> Self { v as $name }
#[inline(always)]
fn from_usize(v: usize) -> Self { v as $name }
#[inline(always)]
fn to_u8(&self) -> u8 { (*self) as u8 }
#[inline(always)]
fn to_u16(&self) -> u16 { (*self) as u16 }
#[inline(always)]
fn to_u32(&self) -> u32 { (*self) as u32 }
#[inline(always)]
fn to_u64(&self) -> u64 { (*self) as u64 }
#[inline(always)]
fn to_u128(&self) -> u128 { (*self) as u128 }
#[inline(always)]
fn to_usize(&self) -> usize { (*self) as usize }
#[inline(always)]
fn from_u8_opt(v: u8) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn from_u16_opt(v: u16) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn from_u32_opt(v: u32) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn from_u64_opt(v: u64) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn from_u128_opt(v: u128) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn from_usize_opt(v: usize) -> Option<Self> { v.try_into().ok() }
#[inline(always)]
fn to_u8_opt(&self) -> Option<u8> { (*self).try_into().ok() }
#[inline(always)]
fn to_u16_opt(&self) -> Option<u16> { (*self).try_into().ok() }
#[inline(always)]
fn to_u32_opt(&self) -> Option<u32> { (*self).try_into().ok() }
#[inline(always)]
fn to_u64_opt(&self) -> Option<u64> { (*self).try_into().ok() }
#[inline(always)]
fn to_u128_opt(&self) -> Option<u128> { (*self).try_into().ok() }
#[inline(always)]
fn to_usize_opt(&self) -> Option<usize> { (*self).try_into().ok() }
}
}
}
prim!(u8 , 8 );
prim!(u16 , 16 );
prim!(u32 , 32 );
prim!(u64 , 64 );
prim!(u128, 128);

View file

@ -0,0 +1,52 @@
use enumset::*;
#[derive(EnumSetType)]
enum VariantOver127 {
Variant = 128,
}
#[derive(EnumSetType)]
#[repr(i64)]
enum VariantOverU32 {
Variant = 0x100000000,
}
#[derive(EnumSetType)]
enum TooManyVariants {
_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20,
_21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39,
_40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58,
_59, _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77,
_78, _79, _80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95, _96,
_97, _98, _99, _100, _101, _102, _103, _104, _105, _106, _107, _108, _109, _110, _111, _112,
_113, _114, _115, _116, _117, _118, _119, _120, _121, _122, _123, _124, _125, _126, _127, _128,
}
#[derive(EnumSetType)]
enum NegativeVariant {
Variant = -1,
}
#[derive(EnumSetType)]
#[repr(usize)]
enum BadRepr {
Variant,
}
#[derive(EnumSetType)]
enum HasFields {
Variant(u32),
}
#[derive(EnumSetType)]
#[enumset(serialize_repr = "u8")]
enum BadSerializationRepr {
Variant = 8,
}
#[derive(EnumSetType)]
struct BadItemType {
}
fn main() { }

View file

@ -0,0 +1,45 @@
error: `#[derive(EnumSetType)]` currently only supports discriminants up to 127.
--> $DIR/variants.rs:5:5
|
5 | Variant = 128,
| ^^^^^^^^^^^^^
error: Enum set discriminants must be `u32`s. (larger discrimiants are still unsupported with reprs that allow them.)
--> $DIR/variants.rs:11:15
|
11 | Variant = 0x100000000,
| ^^^^^^^^^^^
error: `#[derive(EnumSetType)]` currently only supports enums up to 128 variants.
--> $DIR/variants.rs:22:95
|
22 | _113, _114, _115, _116, _117, _118, _119, _120, _121, _122, _123, _124, _125, _126, _127, _128,
| ^^^^
error: Enum set discriminants must be `u32`s.
--> $DIR/variants.rs:27:5
|
27 | Variant = -1,
| ^^^^^^^^^^^^
error: `#[derive(EnumSetType)]` can only be used on fieldless enums.
--> $DIR/variants.rs:38:5
|
38 | Variant(u32),
| ^^^^^^^^^^^^
error: serialize_repr cannot be smaller than bitset.
--> $DIR/variants.rs:41:10
|
41 | #[derive(EnumSetType)]
| ^^^^^^^^^^^
|
= note: this error originates in the derive macro `EnumSetType` (in Nightly builds, run with -Z macro-backtrace for more info)
error: `#[derive(EnumSetType)]` may only be used on enums
--> $DIR/variants.rs:48:1
|
48 | / struct BadItemType {
49 | |
50 | | }
| |_^

View file

@ -0,0 +1,50 @@
#![no_std]
#![allow(dead_code)]
extern crate std as __renamed_std; // so we don't have compile issues, but ::std still errors.
use enumset::EnumSetType;
#[derive(EnumSetType)]
pub enum EmptyEnum { }
#[derive(EnumSetType)]
pub enum Enum1 {
A,
}
#[derive(EnumSetType)]
pub enum SmallEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[derive(EnumSetType)]
pub enum Enum128 {
A, B, C, D, E, F, G, H, _8, _9, _10, _11, _12, _13, _14, _15,
_16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31,
_32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47,
_48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63,
_64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79,
_80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95,
_96, _97, _98, _99, _100, _101, _102, _103, _104, _105, _106, _107, _108, _109,
_110, _111, _112, _113, _114, _115, _116, _117, _118, _119, _120, _121, _122,
_123, _124, _125, _126, _127,
}
#[derive(EnumSetType)]
pub enum SparseEnum {
A = 0xA, B = 20, C = 30, D = 40, E = 50, F = 60, G = 70, H = 80,
}
#[repr(u32)]
#[derive(EnumSetType)]
pub enum ReprEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[repr(C)]
#[derive(EnumSetType)]
pub enum ReprEnum4 {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
pub fn main() {
}

View file

@ -0,0 +1,50 @@
#![no_std]
#![allow(dead_code)]
extern crate std as __renamed_std; // so we don't have compile issues, but ::std still errors.
use enumset::*;
#[derive(EnumSetType)]
pub enum EmptyEnum { }
#[derive(EnumSetType)]
pub enum Enum1 {
A,
}
#[derive(EnumSetType)]
pub enum SmallEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[derive(EnumSetType)]
pub enum Enum128 {
A, B, C, D, E, F, G, H, _8, _9, _10, _11, _12, _13, _14, _15,
_16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31,
_32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47,
_48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63,
_64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79,
_80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95,
_96, _97, _98, _99, _100, _101, _102, _103, _104, _105, _106, _107, _108, _109,
_110, _111, _112, _113, _114, _115, _116, _117, _118, _119, _120, _121, _122,
_123, _124, _125, _126, _127,
}
#[derive(EnumSetType)]
pub enum SparseEnum {
A = 0xA, B = 20, C = 30, D = 40, E = 50, F = 60, G = 70, H = 80,
}
#[repr(u32)]
#[derive(EnumSetType)]
pub enum ReprEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[repr(C)]
#[derive(EnumSetType)]
pub enum ReprEnum4 {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
pub fn main() {
}

438
third_party/rust/enumset/tests/ops.rs vendored Normal file
View file

@ -0,0 +1,438 @@
#![allow(dead_code)]
use enumset::*;
use std::collections::{HashSet, BTreeSet};
#[derive(EnumSetType, Debug)]
pub enum EmptyEnum { }
#[derive(EnumSetType, Debug)]
pub enum Enum1 {
A,
}
#[derive(EnumSetType, Debug)]
pub enum SmallEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[derive(EnumSetType, Debug)]
pub enum LargeEnum {
_00, _01, _02, _03, _04, _05, _06, _07,
_10, _11, _12, _13, _14, _15, _16, _17,
_20, _21, _22, _23, _24, _25, _26, _27,
_30, _31, _32, _33, _34, _35, _36, _37,
_40, _41, _42, _43, _44, _45, _46, _47,
_50, _51, _52, _53, _54, _55, _56, _57,
_60, _61, _62, _63, _64, _65, _66, _67,
_70, _71, _72, _73, _74, _75, _76, _77,
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[derive(EnumSetType, Debug)]
pub enum Enum8 {
A, B, C, D, E, F, G, H,
}
#[derive(EnumSetType, Debug)]
pub enum Enum128 {
A, B, C, D, E, F, G, H, _8, _9, _10, _11, _12, _13, _14, _15,
_16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31,
_32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47,
_48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63,
_64, _65, _66, _67, _68, _69, _70, _71, _72, _73, _74, _75, _76, _77, _78, _79,
_80, _81, _82, _83, _84, _85, _86, _87, _88, _89, _90, _91, _92, _93, _94, _95,
_96, _97, _98, _99, _100, _101, _102, _103, _104, _105, _106, _107, _108, _109,
_110, _111, _112, _113, _114, _115, _116, _117, _118, _119, _120, _121, _122,
_123, _124, _125, _126, _127,
}
#[derive(EnumSetType, Debug)]
pub enum SparseEnum {
A = 0xA, B = 20, C = 30, D = 40, E = 50, F = 60, G = 70, H = 80,
}
#[repr(u32)]
#[derive(EnumSetType, Debug)]
pub enum ReprEnum {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[repr(u64)]
#[derive(EnumSetType, Debug)]
pub enum ReprEnum2 {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[repr(isize)]
#[derive(EnumSetType, Debug)]
pub enum ReprEnum3 {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
#[repr(C)]
#[derive(EnumSetType, Debug)]
pub enum ReprEnum4 {
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
macro_rules! test_variants {
($enum_name:ident $all_empty_test:ident $($variant:ident,)*) => {
#[test]
fn $all_empty_test() {
let all = EnumSet::<$enum_name>::all();
let empty = EnumSet::<$enum_name>::empty();
$(
assert!(!empty.contains($enum_name::$variant));
assert!(all.contains($enum_name::$variant));
)*
}
}
}
test_variants! { SmallEnum small_enum_all_empty
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
test_variants! { LargeEnum large_enum_all_empty
_00, _01, _02, _03, _04, _05, _06, _07,
_10, _11, _12, _13, _14, _15, _16, _17,
_20, _21, _22, _23, _24, _25, _26, _27,
_30, _31, _32, _33, _34, _35, _36, _37,
_40, _41, _42, _43, _44, _45, _46, _47,
_50, _51, _52, _53, _54, _55, _56, _57,
_60, _61, _62, _63, _64, _65, _66, _67,
_70, _71, _72, _73, _74, _75, _76, _77,
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
}
test_variants! { SparseEnum sparse_enum_all_empty
A, B, C, D, E, F, G,
}
macro_rules! test_enum {
($e:ident, $mem_size:expr) => {
const CONST_SET: EnumSet<$e> = enum_set!($e::A | $e::C);
const CONST_1_SET: EnumSet<$e> = enum_set!($e::A);
const EMPTY_SET: EnumSet<$e> = enum_set!();
#[test]
fn const_set() {
assert_eq!(CONST_SET.len(), 2);
assert_eq!(CONST_1_SET.len(), 1);
assert!(CONST_SET.contains($e::A));
assert!(CONST_SET.contains($e::C));
assert!(EMPTY_SET.is_empty());
}
#[test]
fn basic_add_remove() {
let mut set = EnumSet::new();
set.insert($e::A);
set.insert($e::B);
set.insert($e::C);
assert_eq!(set, $e::A | $e::B | $e::C);
set.remove($e::B);
assert_eq!(set, $e::A | $e::C);
set.insert($e::D);
assert_eq!(set, $e::A | $e::C | $e::D);
set.insert_all($e::F | $e::E | $e::G);
assert_eq!(set, $e::A | $e::C | $e::D | $e::F | $e::E | $e::G);
set.remove_all($e::A | $e::D | $e::G);
assert_eq!(set, $e::C | $e::F | $e::E);
assert!(!set.is_empty());
set.clear();
assert!(set.is_empty());
}
#[test]
fn already_present_element() {
let mut set = EnumSet::new();
assert!(set.insert($e::A));
assert!(!set.insert($e::A));
set.remove($e::A);
assert!(set.insert($e::A));
}
#[test]
fn empty_is_empty() {
assert_eq!(EnumSet::<$e>::empty().len(), 0)
}
#[test]
fn all_len() {
assert_eq!(EnumSet::<$e>::all().len(), EnumSet::<$e>::variant_count() as usize)
}
#[test]
fn iter_test() {
let mut set = EnumSet::new();
set.insert($e::A);
set.insert($e::B);
set.extend($e::C | $e::E);
let mut set_2 = EnumSet::new();
let vec: Vec<_> = set.iter().collect();
for val in vec {
assert!(!set_2.contains(val));
set_2.insert(val);
}
assert_eq!(set, set_2);
let mut set_3 = EnumSet::new();
for val in set {
assert!(!set_3.contains(val));
set_3.insert(val);
}
assert_eq!(set, set_3);
let mut set_4 = EnumSet::new();
let vec: EnumSet<_> = set.into_iter().map(EnumSet::only).collect();
for val in vec {
assert!(!set_4.contains(val));
set_4.insert(val);
}
assert_eq!(set, set_4);
let mut set_5 = EnumSet::new();
let vec: EnumSet<_> = set.iter().collect();
for val in vec {
assert!(!set_5.contains(val));
set_5.insert(val);
}
assert_eq!(set, set_5);
}
fn check_iter_size_hint(set: EnumSet<$e>) {
let count = set.len();
let mut itr = set.iter();
for idx in 0 .. count {
assert_eq!(itr.size_hint(), (count-idx, Some(count-idx)));
assert_eq!(itr.len(), count-idx);
assert!(itr.next().is_some());
}
assert_eq!(itr.size_hint(), (0, Some(0)));
assert_eq!(itr.len(), 0);
}
#[test]
fn test_iter_size_hint() {
check_iter_size_hint(EnumSet::<$e>::all());
let mut set = EnumSet::new();
set.insert($e::A);
set.insert($e::C);
set.insert($e::E);
check_iter_size_hint(set);
}
#[test]
fn iter_ops_test() {
let set = $e::A | $e::B | $e::C | $e::E;
let set2 = set.iter().filter(|&v| v != $e::B).collect::<EnumSet<_>>();
assert_eq!(set2, $e::A | $e::C | $e::E);
}
#[test]
fn basic_ops_test() {
assert_eq!(($e::A | $e::B) | ($e::B | $e::C), $e::A | $e::B | $e::C);
assert_eq!(($e::A | $e::B) & ($e::B | $e::C), $e::B);
assert_eq!(($e::A | $e::B) ^ ($e::B | $e::C), $e::A | $e::C);
assert_eq!(($e::A | $e::B) - ($e::B | $e::C), $e::A);
assert_eq!($e::A | !$e::A, EnumSet::<$e>::all());
}
#[test]
fn mutable_ops_test() {
let mut set = $e::A | $e::B;
assert_eq!(set, $e::A | $e::B);
set |= $e::C | $e::D;
assert_eq!(set, $e::A | $e::B | $e::C | $e::D);
set -= $e::C;
assert_eq!(set, $e::A | $e::B | $e::D);
set ^= $e::B | $e::E;
assert_eq!(set, $e::A | $e::D | $e::E);
set &= $e::A | $e::E | $e::F;
assert_eq!(set, $e::A | $e::E);
}
#[test]
fn basic_set_status() {
assert!(($e::A | $e::B | $e::C).is_disjoint($e::D | $e::E | $e::F));
assert!(!($e::A | $e::B | $e::C | $e::D).is_disjoint($e::D | $e::E | $e::F));
assert!(($e::A | $e::B).is_subset($e::A | $e::B | $e::C));
assert!(!($e::A | $e::D).is_subset($e::A | $e::B | $e::C));
}
#[test]
fn debug_impl() {
assert_eq!(format!("{:?}", $e::A | $e::B | $e::D), "EnumSet(A | B | D)");
}
#[test]
fn to_from_bits() {
let value = $e::A | $e::C | $e::D | $e::F | $e::E | $e::G;
assert_eq!(EnumSet::from_u128(value.as_u128()), value);
}
#[test]
#[should_panic]
fn too_many_bits() {
if EnumSet::<$e>::variant_count() == 128 {
panic!("(test skipped)")
}
EnumSet::<$e>::from_u128(!0);
}
#[test]
fn match_const_test() {
match CONST_SET {
CONST_SET => { /* ok */ }
_ => panic!("match fell through?"),
}
}
#[test]
fn set_test() {
const SET_TEST_A: EnumSet<$e> = enum_set!($e::A | $e::B | $e::C);
const SET_TEST_B: EnumSet<$e> = enum_set!($e::A | $e::B | $e::D);
const SET_TEST_C: EnumSet<$e> = enum_set!($e::A | $e::B | $e::E);
const SET_TEST_D: EnumSet<$e> = enum_set!($e::A | $e::B | $e::F);
const SET_TEST_E: EnumSet<$e> = enum_set!($e::A | $e::B | $e::G);
macro_rules! test_set {
($set:ident) => {{
assert!(!$set.contains(&SET_TEST_A));
assert!(!$set.contains(&SET_TEST_B));
assert!(!$set.contains(&SET_TEST_C));
assert!(!$set.contains(&SET_TEST_D));
assert!(!$set.contains(&SET_TEST_E));
$set.insert(SET_TEST_A);
$set.insert(SET_TEST_C);
assert!($set.contains(&SET_TEST_A));
assert!(!$set.contains(&SET_TEST_B));
assert!($set.contains(&SET_TEST_C));
assert!(!$set.contains(&SET_TEST_D));
assert!(!$set.contains(&SET_TEST_E));
$set.remove(&SET_TEST_C);
$set.remove(&SET_TEST_D);
assert!($set.contains(&SET_TEST_A));
assert!(!$set.contains(&SET_TEST_B));
assert!(!$set.contains(&SET_TEST_C));
assert!(!$set.contains(&SET_TEST_D));
assert!(!$set.contains(&SET_TEST_E));
$set.insert(SET_TEST_A);
$set.insert(SET_TEST_D);
assert!($set.contains(&SET_TEST_A));
assert!(!$set.contains(&SET_TEST_B));
assert!(!$set.contains(&SET_TEST_C));
assert!($set.contains(&SET_TEST_D));
assert!(!$set.contains(&SET_TEST_E));
}}
}
let mut hash_set = HashSet::new();
test_set!(hash_set);
let mut tree_set = BTreeSet::new();
test_set!(tree_set);
}
#[test]
fn sum_test() {
let target = $e::A | $e::B | $e::D | $e::E | $e::G | $e::H;
let list_a = [$e::A | $e::B, $e::D | $e::E, $e::G | $e::H];
let sum_a: EnumSet<$e> = list_a.iter().map(|x| *x).sum();
assert_eq!(target, sum_a);
let sum_b: EnumSet<$e> = list_a.iter().sum();
assert_eq!(target, sum_b);
let list_b = [$e::A, $e::B, $e::D, $e::E, $e::G, $e::H];
let sum_c: EnumSet<$e> = list_b.iter().map(|x| *x).sum();
assert_eq!(target, sum_c);
let sum_d: EnumSet<$e> = list_b.iter().sum();
assert_eq!(target, sum_d);
}
#[test]
fn check_size() {
assert_eq!(::std::mem::size_of::<EnumSet<$e>>(), $mem_size);
}
}
}
macro_rules! tests {
($m:ident, $($tt:tt)*) => { mod $m { use super::*; $($tt)*; } }
}
tests!(small_enum, test_enum!(SmallEnum, 4));
tests!(large_enum, test_enum!(LargeEnum, 16));
tests!(enum8, test_enum!(Enum8, 1));
tests!(enum128, test_enum!(Enum128, 16));
tests!(sparse_enum, test_enum!(SparseEnum, 16));
tests!(repr_enum_u32, test_enum!(ReprEnum, 4));
tests!(repr_enum_u64, test_enum!(ReprEnum2, 4));
tests!(repr_enum_isize, test_enum!(ReprEnum3, 4));
tests!(repr_enum_c, test_enum!(ReprEnum4, 4));
#[derive(EnumSetType, Debug)]
pub enum ThresholdEnum {
A = 1, B, C, D,
U8 = 0, U16 = 8, U32 = 16, U64 = 32, U128 = 64,
}
macro_rules! bits_tests {
(
$mod_name:ident, $threshold_expr:expr, ($($too_big_expr:expr),*), $ty:ty,
$to:ident $try_to:ident $to_truncated:ident
$from:ident $try_from:ident $from_truncated:ident
) => {
mod $mod_name {
use super::*;
use crate::ThresholdEnum::*;
#[test]
fn to_from_basic() {
for &mask in &[
$threshold_expr | B | C | D,
$threshold_expr | A | D,
$threshold_expr | B | C,
] {
assert_eq!(mask, EnumSet::<ThresholdEnum>::$from(mask.$to()));
assert_eq!(mask.$to_truncated(), mask.$to());
assert_eq!(Some(mask.$to()), mask.$try_to())
}
}
#[test]
#[should_panic]
fn from_invalid() {
let invalid_mask: $ty = 0x80;
EnumSet::<ThresholdEnum>::$from(invalid_mask);
}
#[test]
fn try_from_invalid() {
assert!(EnumSet::<ThresholdEnum>::$try_from(0xFF).is_none());
}
$(
#[test]
fn try_to_overflow() {
let set: EnumSet<ThresholdEnum> = $too_big_expr.into();
assert!(set.$try_to().is_none());
}
)*
#[test]
fn truncated_overflow() {
let trunc_invalid = EnumSet::<ThresholdEnum>::$from_truncated(0xFE);
assert_eq!(A | B | C | D, trunc_invalid);
$(
let set: EnumSet<ThresholdEnum> = $too_big_expr | A;
assert_eq!(2, set.$to_truncated());
)*
}
}
}
}
bits_tests!(test_u8_bits, U8, (U16), u8,
as_u8 try_as_u8 as_u8_truncated from_u8 try_from_u8 from_u8_truncated);
bits_tests!(test_u16_bits, U16, (U32), u16,
as_u16 try_as_u16 as_u16_truncated from_u16 try_from_u16 from_u16_truncated);
bits_tests!(test_u32_bits, U32, (U64), u32,
as_u32 try_as_u32 as_u32_truncated from_u32 try_from_u32 from_u32_truncated);
bits_tests!(test_u64_bits, U64, (U128), u64,
as_u64 try_as_u64 as_u64_truncated from_u64 try_from_u64 from_u64_truncated);
bits_tests!(test_u128_bits, U128, (), u128,
as_u128 try_as_u128 as_u128_truncated from_u128 try_from_u128 from_u128_truncated);
bits_tests!(test_uize_bits, U32, (U128), usize,
as_usize try_as_usize as_usize_truncated
from_usize try_from_usize from_usize_truncated);

90
third_party/rust/enumset/tests/serde.rs vendored Normal file
View file

@ -0,0 +1,90 @@
#![cfg(feature = "serde")]
#![allow(dead_code)]
use enumset::*;
use serde_derive::*;
// Test resistance against shadowed types.
type Some = ();
type None = ();
type Result = ();
#[derive(Serialize, Deserialize, EnumSetType, Debug)]
#[enumset(serialize_as_list)]
#[serde(crate="serde2")]
pub enum ListEnum {
A, B, C, D, E, F, G, H,
}
#[derive(EnumSetType, Debug)]
#[enumset(serialize_repr = "u128")]
pub enum ReprEnum {
A, B, C, D, E, F, G, H,
}
#[derive(EnumSetType, Debug)]
#[enumset(serialize_repr = "u128", serialize_deny_unknown)]
pub enum DenyUnknownEnum {
A, B, C, D, E, F, G, H,
}
macro_rules! serde_test_simple {
($e:ident, $ser_size:expr) => {
#[test]
fn serialize_deserialize_test_bincode() {
let value = $e::A | $e::C | $e::D | $e::F | $e::E | $e::G;
let serialized = bincode::serialize(&value).unwrap();
let deserialized = bincode::deserialize::<EnumSet<$e>>(&serialized).unwrap();
assert_eq!(value, deserialized);
if $ser_size != !0 {
assert_eq!(serialized.len(), $ser_size);
}
}
#[test]
fn serialize_deserialize_test_json() {
let value = $e::A | $e::C | $e::D | $e::F | $e::E | $e::G;
let serialized = serde_json::to_string(&value).unwrap();
let deserialized = serde_json::from_str::<EnumSet<$e>>(&serialized).unwrap();
assert_eq!(value, deserialized);
}
}
}
macro_rules! serde_test {
($e:ident, $ser_size:expr) => {
serde_test_simple!($e, $ser_size);
#[test]
fn deserialize_all_test() {
let serialized = bincode::serialize(&!0u128).unwrap();
let deserialized = bincode::deserialize::<EnumSet<$e>>(&serialized).unwrap();
assert_eq!(EnumSet::<$e>::all(), deserialized);
}
}
}
macro_rules! tests {
($m:ident, $($tt:tt)*) => { mod $m { use super::*; $($tt)*; } }
}
#[test]
fn test_deny_unknown() {
let serialized = bincode::serialize(&!0u128).unwrap();
let deserialized = bincode::deserialize::<EnumSet<DenyUnknownEnum>>(&serialized);
assert!(deserialized.is_err());
}
#[test]
fn test_json_reprs() {
assert_eq!(ListEnum::A | ListEnum::C | ListEnum::F,
serde_json::from_str::<EnumSet<ListEnum>>(r#"["A","C","F"]"#).unwrap());
assert_eq!(ReprEnum::A | ReprEnum::C | ReprEnum::D,
serde_json::from_str::<EnumSet<ReprEnum>>("13").unwrap());
assert_eq!(r#"["A","C","F"]"#,
serde_json::to_string(&(ListEnum::A | ListEnum::C | ListEnum::F)).unwrap());
assert_eq!("13",
serde_json::to_string(&(ReprEnum::A | ReprEnum::C | ReprEnum::D)).unwrap());
}
tests!(list_enum, serde_test_simple!(ListEnum, !0));
tests!(repr_enum, serde_test!(ReprEnum, 16));
tests!(deny_unknown_enum, serde_test_simple!(DenyUnknownEnum, 16));

View file

@ -0,0 +1,7 @@
#[rustversion::nightly]
#[test]
fn ui() {
let t = trybuild::TestCases::new();
t.compile_fail("tests/compile-fail/*.rs");
t.pass("tests/compile-pass/*.rs");
}

View file

@ -0,0 +1 @@
{"files":{"Cargo.toml":"c05e0ce6b5442f5df966327cf7c296b966c5ebb676cc8805aade8d033c3bf592","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"79b4502d93c23afe2765054a80d03716d4934eb260cdfbe8c401898df3aa5a8f","README.md":"d45f82ad73b39aad91f85d0688bc6a94402293c40c295d0cab7b2a9b7225677b","src/lib.rs":"62abb9c0a46e00e2532b41c3302403c6f48dfe71983140311285ede01bb006e1"},"package":"6451128aa6655d880755345d085494cf7561a6bee7c8dc821e5d77e6d267ecd4"}

View file

@ -0,0 +1,38 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "enumset_derive"
version = "0.5.5"
authors = ["Alissa Rao <lymia@lymiahugs.com>"]
description = "An internal helper crate for enumset. Not public API."
documentation = "https://lymia.moe/doc/enumset/enumset/"
license = "MIT/Apache-2.0"
repository = "https://github.com/Lymia/enumset"
[lib]
proc-macro = true
[dependencies.darling]
version = "0.13.0"
[dependencies.proc-macro2]
version = "1"
[dependencies.quote]
version = "1"
[dependencies.syn]
version = "1"
[features]
serde = []

View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,26 @@
Copyright (c) 2017-2020 Alissa Rao <lymiahugs@gmail.com>
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1 @@
An internal helper crate for [enumset](https://github.com/Lymia/enumset). Not public API.

View file

@ -0,0 +1,525 @@
#![recursion_limit="256"]
extern crate proc_macro;
use darling::*;
use proc_macro::TokenStream;
use proc_macro2::{TokenStream as SynTokenStream, Literal, Span};
use std::collections::HashSet;
use syn::{*, Result, Error};
use syn::spanned::Spanned;
use quote::*;
/// Helper function for emitting compile errors.
fn error<T>(span: Span, message: &str) -> Result<T> {
Err(Error::new(span, message))
}
/// Decodes the custom attributes for our custom derive.
#[derive(FromDeriveInput, Default)]
#[darling(attributes(enumset), default)]
struct EnumsetAttrs {
no_ops: bool,
serialize_as_list: bool,
serialize_deny_unknown: bool,
#[darling(default)]
serialize_repr: Option<String>,
#[darling(default)]
crate_name: Option<String>,
}
/// An variant in the enum set type.
struct EnumSetValue {
/// The name of the variant.
name: Ident,
/// The discriminant of the variant.
variant_repr: u32,
}
/// Stores information about the enum set type.
#[allow(dead_code)]
struct EnumSetInfo {
/// The name of the enum.
name: Ident,
/// The crate name to use.
crate_name: Option<Ident>,
/// The numeric type to serialize the enum as.
explicit_serde_repr: Option<Ident>,
/// Whether the underlying repr of the enum supports negative values.
has_signed_repr: bool,
/// Whether the underlying repr of the enum supports values higher than 2^32.
has_large_repr: bool,
/// A list of variants in the enum.
variants: Vec<EnumSetValue>,
/// The highest encountered variant discriminant.
max_discrim: u32,
/// The current variant discriminant. Used to track, e.g. `A=10,B,C`.
cur_discrim: u32,
/// A list of variant names that are already in use.
used_variant_names: HashSet<String>,
/// A list of variant discriminants that are already in use.
used_discriminants: HashSet<u32>,
/// Avoid generating operator overloads on the enum type.
no_ops: bool,
/// Serialize the enum as a list.
serialize_as_list: bool,
/// Disallow unknown bits while deserializing the enum.
serialize_deny_unknown: bool,
}
impl EnumSetInfo {
fn new(input: &DeriveInput, attrs: EnumsetAttrs) -> EnumSetInfo {
EnumSetInfo {
name: input.ident.clone(),
crate_name: attrs.crate_name.map(|x| Ident::new(&x, Span::call_site())),
explicit_serde_repr: attrs.serialize_repr.map(|x| Ident::new(&x, Span::call_site())),
has_signed_repr: false,
has_large_repr: false,
variants: Vec::new(),
max_discrim: 0,
cur_discrim: 0,
used_variant_names: HashSet::new(),
used_discriminants: HashSet::new(),
no_ops: attrs.no_ops,
serialize_as_list: attrs.serialize_as_list,
serialize_deny_unknown: attrs.serialize_deny_unknown
}
}
/// Sets an explicit repr for the enumset.
fn push_explicit_repr(&mut self, attr_span: Span, repr: &str) -> Result<()> {
// Check whether the repr is supported, and if so, set some flags for better error
// messages later on.
match repr {
"Rust" | "C" | "u8" | "u16" | "u32" => Ok(()),
"usize" | "u64" | "u128" => {
self.has_large_repr = true;
Ok(())
}
"i8" | "i16" | "i32" => {
self.has_signed_repr = true;
Ok(())
}
"isize" | "i64" | "i128" => {
self.has_signed_repr = true;
self.has_large_repr = true;
Ok(())
}
_ => error(attr_span, "Unsupported repr.")
}
}
/// Adds a variant to the enumset.
fn push_variant(&mut self, variant: &Variant) -> Result<()> {
if self.used_variant_names.contains(&variant.ident.to_string()) {
error(variant.span(), "Duplicated variant name.")
} else if let Fields::Unit = variant.fields {
// Parse the discriminant.
if let Some((_, expr)) = &variant.discriminant {
let discriminant_fail_message = format!(
"Enum set discriminants must be `u32`s.{}",
if self.has_signed_repr || self.has_large_repr {
format!(
" ({} discrimiants are still unsupported with reprs that allow them.)",
if self.has_large_repr {
"larger"
} else if self.has_signed_repr {
"negative"
} else {
"larger or negative"
}
)
} else {
String::new()
},
);
if let Expr::Lit(ExprLit { lit: Lit::Int(i), .. }) = expr {
match i.base10_parse() {
Ok(val) => self.cur_discrim = val,
Err(_) => error(expr.span(), &discriminant_fail_message)?,
}
} else {
error(variant.span(), &discriminant_fail_message)?;
}
}
// Validate the discriminant.
let discriminant = self.cur_discrim;
if discriminant >= 128 {
let message = if self.variants.len() <= 127 {
"`#[derive(EnumSetType)]` currently only supports discriminants up to 127."
} else {
"`#[derive(EnumSetType)]` currently only supports enums up to 128 variants."
};
error(variant.span(), message)?;
}
if self.used_discriminants.contains(&discriminant) {
error(variant.span(), "Duplicated enum discriminant.")?;
}
// Add the variant to the info.
self.cur_discrim += 1;
if discriminant > self.max_discrim {
self.max_discrim = discriminant;
}
self.variants.push(EnumSetValue {
name: variant.ident.clone(),
variant_repr: discriminant,
});
self.used_variant_names.insert(variant.ident.to_string());
self.used_discriminants.insert(discriminant);
Ok(())
} else {
error(variant.span(), "`#[derive(EnumSetType)]` can only be used on fieldless enums.")
}
}
/// Validate the enumset type.
fn validate(&self) -> Result<()> {
// Check if all bits of the bitset can fit in the serialization representation.
if let Some(explicit_serde_repr) = &self.explicit_serde_repr {
let is_overflowed = match explicit_serde_repr.to_string().as_str() {
"u8" => self.max_discrim >= 8,
"u16" => self.max_discrim >= 16,
"u32" => self.max_discrim >= 32,
"u64" => self.max_discrim >= 64,
"u128" => self.max_discrim >= 128,
_ => error(
Span::call_site(),
"Only `u8`, `u16`, `u32`, `u64` and `u128` are supported for serde_repr."
)?,
};
if is_overflowed {
error(Span::call_site(), "serialize_repr cannot be smaller than bitset.")?;
}
}
Ok(())
}
/// Computes the underlying type used to store the enumset.
fn enumset_repr(&self) -> SynTokenStream {
if self.max_discrim <= 7 {
quote! { u8 }
} else if self.max_discrim <= 15 {
quote! { u16 }
} else if self.max_discrim <= 31 {
quote! { u32 }
} else if self.max_discrim <= 63 {
quote! { u64 }
} else if self.max_discrim <= 127 {
quote! { u128 }
} else {
panic!("max_variant > 127?")
}
}
/// Computes the underlying type used to serialize the enumset.
#[cfg(feature = "serde")]
fn serde_repr(&self) -> SynTokenStream {
if let Some(serde_repr) = &self.explicit_serde_repr {
quote! { #serde_repr }
} else {
self.enumset_repr()
}
}
/// Returns a bitmask of all variants in the set.
fn all_variants(&self) -> u128 {
let mut accum = 0u128;
for variant in &self.variants {
assert!(variant.variant_repr <= 127);
accum |= 1u128 << variant.variant_repr as u128;
}
accum
}
}
/// Generates the actual `EnumSetType` impl.
fn enum_set_type_impl(info: EnumSetInfo) -> SynTokenStream {
let name = &info.name;
let enumset = match &info.crate_name {
Some(crate_name) => quote!(::#crate_name),
None => quote!(::enumset),
};
let typed_enumset = quote!(#enumset::EnumSet<#name>);
let core = quote!(#enumset::__internal::core_export);
let repr = info.enumset_repr();
let all_variants = Literal::u128_unsuffixed(info.all_variants());
let ops = if info.no_ops {
quote! {}
} else {
quote! {
impl <O : Into<#typed_enumset>> #core::ops::Sub<O> for #name {
type Output = #typed_enumset;
fn sub(self, other: O) -> Self::Output {
#enumset::EnumSet::only(self) - other.into()
}
}
impl <O : Into<#typed_enumset>> #core::ops::BitAnd<O> for #name {
type Output = #typed_enumset;
fn bitand(self, other: O) -> Self::Output {
#enumset::EnumSet::only(self) & other.into()
}
}
impl <O : Into<#typed_enumset>> #core::ops::BitOr<O> for #name {
type Output = #typed_enumset;
fn bitor(self, other: O) -> Self::Output {
#enumset::EnumSet::only(self) | other.into()
}
}
impl <O : Into<#typed_enumset>> #core::ops::BitXor<O> for #name {
type Output = #typed_enumset;
fn bitxor(self, other: O) -> Self::Output {
#enumset::EnumSet::only(self) ^ other.into()
}
}
impl #core::ops::Not for #name {
type Output = #typed_enumset;
fn not(self) -> Self::Output {
!#enumset::EnumSet::only(self)
}
}
impl #core::cmp::PartialEq<#typed_enumset> for #name {
fn eq(&self, other: &#typed_enumset) -> bool {
#enumset::EnumSet::only(*self) == *other
}
}
}
};
#[cfg(feature = "serde")]
let serde = quote!(#enumset::__internal::serde);
#[cfg(feature = "serde")]
let serde_ops = if info.serialize_as_list {
let expecting_str = format!("a list of {}", name);
quote! {
fn serialize<S: #serde::Serializer>(
set: #enumset::EnumSet<#name>, ser: S,
) -> #core::result::Result<S::Ok, S::Error> {
use #serde::ser::SerializeSeq;
let mut seq = ser.serialize_seq(#core::prelude::v1::Some(set.len()))?;
for bit in set {
seq.serialize_element(&bit)?;
}
seq.end()
}
fn deserialize<'de, D: #serde::Deserializer<'de>>(
de: D,
) -> #core::result::Result<#enumset::EnumSet<#name>, D::Error> {
struct Visitor;
impl <'de> #serde::de::Visitor<'de> for Visitor {
type Value = #enumset::EnumSet<#name>;
fn expecting(
&self, formatter: &mut #core::fmt::Formatter,
) -> #core::fmt::Result {
write!(formatter, #expecting_str)
}
fn visit_seq<A>(
mut self, mut seq: A,
) -> #core::result::Result<Self::Value, A::Error> where
A: #serde::de::SeqAccess<'de>
{
let mut accum = #enumset::EnumSet::<#name>::new();
while let #core::prelude::v1::Some(val) = seq.next_element::<#name>()? {
accum |= val;
}
#core::prelude::v1::Ok(accum)
}
}
de.deserialize_seq(Visitor)
}
}
} else {
let serialize_repr = info.serde_repr();
let check_unknown = if info.serialize_deny_unknown {
quote! {
if value & !#all_variants != 0 {
use #serde::de::Error;
return #core::prelude::v1::Err(
D::Error::custom("enumset contains unknown bits")
)
}
}
} else {
quote! { }
};
quote! {
fn serialize<S: #serde::Serializer>(
set: #enumset::EnumSet<#name>, ser: S,
) -> #core::result::Result<S::Ok, S::Error> {
#serde::Serialize::serialize(&(set.__priv_repr as #serialize_repr), ser)
}
fn deserialize<'de, D: #serde::Deserializer<'de>>(
de: D,
) -> #core::result::Result<#enumset::EnumSet<#name>, D::Error> {
let value = <#serialize_repr as #serde::Deserialize>::deserialize(de)?;
#check_unknown
#core::prelude::v1::Ok(#enumset::EnumSet {
__priv_repr: (value & #all_variants) as #repr,
})
}
}
};
#[cfg(not(feature = "serde"))]
let serde_ops = quote! { };
let is_uninhabited = info.variants.is_empty();
let is_zst = info.variants.len() == 1;
let into_impl = if is_uninhabited {
quote! {
fn enum_into_u32(self) -> u32 {
panic!(concat!(stringify!(#name), " is uninhabited."))
}
unsafe fn enum_from_u32(val: u32) -> Self {
panic!(concat!(stringify!(#name), " is uninhabited."))
}
}
} else if is_zst {
let variant = &info.variants[0].name;
quote! {
fn enum_into_u32(self) -> u32 {
self as u32
}
unsafe fn enum_from_u32(val: u32) -> Self {
#name::#variant
}
}
} else {
let variant_name: Vec<_> = info.variants.iter().map(|x| &x.name).collect();
let variant_value: Vec<_> = info.variants.iter().map(|x| x.variant_repr).collect();
let const_field: Vec<_> = ["IS_U8", "IS_U16", "IS_U32", "IS_U64", "IS_U128"]
.iter().map(|x| Ident::new(x, Span::call_site())).collect();
let int_type: Vec<_> = ["u8", "u16", "u32", "u64", "u128"]
.iter().map(|x| Ident::new(x, Span::call_site())).collect();
quote! {
fn enum_into_u32(self) -> u32 {
self as u32
}
unsafe fn enum_from_u32(val: u32) -> Self {
// We put these in const fields so the branches they guard aren't generated even
// on -O0
#(const #const_field: bool =
#core::mem::size_of::<#name>() == #core::mem::size_of::<#int_type>();)*
match val {
// Every valid variant value has an explicit branch. If they get optimized out,
// great. If the representation has changed somehow, and they don't, oh well,
// there's still no UB.
#(#variant_value => #name::#variant_name,)*
// Helps hint to the LLVM that this is a transmute. Note that this branch is
// still unreachable.
#(x if #const_field => {
let x = x as #int_type;
*(&x as *const _ as *const #name)
})*
// Default case. Sometimes causes LLVM to generate a table instead of a simple
// transmute, but, oh well.
_ => #core::hint::unreachable_unchecked(),
}
}
}
};
let eq_impl = if is_uninhabited {
quote!(panic!(concat!(stringify!(#name), " is uninhabited.")))
} else {
quote!((*self as u32) == (*other as u32))
};
// used in the enum_set! macro `const fn`s.
let self_as_repr_mask = if is_uninhabited {
quote! { 0 } // impossible anyway
} else {
quote! { 1 << self as #repr }
};
quote! {
unsafe impl #enumset::__internal::EnumSetTypePrivate for #name {
type Repr = #repr;
const ALL_BITS: Self::Repr = #all_variants;
#into_impl
#serde_ops
}
unsafe impl #enumset::EnumSetType for #name { }
impl #core::cmp::PartialEq for #name {
fn eq(&self, other: &Self) -> bool {
#eq_impl
}
}
impl #core::cmp::Eq for #name { }
impl #core::clone::Clone for #name {
fn clone(&self) -> Self {
*self
}
}
impl #core::marker::Copy for #name { }
impl #name {
/// Creates a new enumset with only this variant.
#[deprecated(note = "This method is an internal implementation detail generated by \
the `enumset` crate's procedural macro. It should not be used \
directly. Use `EnumSet::only` instead.")]
#[doc(hidden)]
pub const fn __impl_enumset_internal__const_only(self) -> #enumset::EnumSet<#name> {
#enumset::EnumSet { __priv_repr: #self_as_repr_mask }
}
/// Creates a new enumset with this variant added.
#[deprecated(note = "This method is an internal implementation detail generated by \
the `enumset` crate's procedural macro. It should not be used \
directly. Use the `|` operator instead.")]
#[doc(hidden)]
pub const fn __impl_enumset_internal__const_merge(
self, chain: #enumset::EnumSet<#name>,
) -> #enumset::EnumSet<#name> {
#enumset::EnumSet { __priv_repr: chain.__priv_repr | #self_as_repr_mask }
}
}
#ops
}
}
/// A wrapper that parses the input enum.
#[proc_macro_derive(EnumSetType, attributes(enumset))]
pub fn derive_enum_set_type(input: TokenStream) -> TokenStream {
let input: DeriveInput = parse_macro_input!(input);
let attrs: EnumsetAttrs = match EnumsetAttrs::from_derive_input(&input) {
Ok(attrs) => attrs,
Err(e) => return e.write_errors().into(),
};
match derive_enum_set_type_0(input, attrs) {
Ok(v) => v,
Err(e) => e.to_compile_error().into(),
}
}
fn derive_enum_set_type_0(input: DeriveInput, attrs: EnumsetAttrs) -> Result<TokenStream> {
if !input.generics.params.is_empty() {
error(
input.generics.span(),
"`#[derive(EnumSetType)]` cannot be used on enums with type parameters.",
)
} else if let Data::Enum(data) = &input.data {
let mut info = EnumSetInfo::new(&input, attrs);
for attr in &input.attrs {
if attr.path.is_ident(&Ident::new("repr", Span::call_site())) {
let meta: Ident = attr.parse_args()?;
info.push_explicit_repr(attr.span(), meta.to_string().as_str())?;
}
}
for variant in &data.variants {
info.push_variant(variant)?;
}
info.validate()?;
Ok(enum_set_type_impl(info).into())
} else {
error(input.span(), "`#[derive(EnumSetType)]` may only be used on enums")
}
}

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"dd7ab541b6d38e83665fa35fa50dc8a68867974df6085420bf45ca6f30c9816a","build.rs":"a17b1bb1bd3de3fc958f72d4d1357f7bc4432faa26640c95b5fbfccf40579d67","src/codec.rs":"ee422054b6f330d303a150223fd498dc2277c70663b0c3c0dcb7f0fc14fee7d8","src/datagram.rs":"569f8d9e34d7ee17144bf63d34136ecd9778da0d337e513f338738c50284615e","src/event.rs":"f60fee9f4b09ef47ff5e4bfa21c07e45ffd5873c292f2605f24d834070127d62","src/header.rs":"b7d4eeb40952b36f71ae1f37ce82c9617af8b84c171576de4eca9d50a3071103","src/hrtime.rs":"45a608ce9f00e2666ce95422a278c6dc0ff4e229b114e7bcf0b4c0d9dc61ad56","src/incrdecoder.rs":"91dab6f99073b1a6c88ff2f2625315dadb0b00d7bb0704e13b186155fbf496e8","src/lib.rs":"0a3679ab0bc67817097701010881e1c2f48ad1ab0700f12babc46cc59c5c788b","src/log.rs":"b69e492af85e65866cb6588138e8a337dd897d3ce399cb4e9fb8cc04ac042b7f","src/qlog.rs":"ca323c91d61810ebef2ebeb967836dda384a60a9fb492c2b8d1b235a98f2e4bf","src/timer.rs":"e63af7e7df968bf702583f263cfb63e6dca4e599bacffa2de0a6383d85333636","tests/log.rs":"480b165b7907ec642c508b303d63005eee1427115d6973a349eaf6b2242ed18d"},"package":null}
{"files":{"Cargo.toml":"9013a62945e20404cfc3624df017feeb3b86e096b6882b0fd2254c4e87d24b6b","build.rs":"a17b1bb1bd3de3fc958f72d4d1357f7bc4432faa26640c95b5fbfccf40579d67","src/codec.rs":"876fe7da558964046765aa2a2d7ebad9d53e1d4b31a1bf233d47b939f417dba1","src/datagram.rs":"569f8d9e34d7ee17144bf63d34136ecd9778da0d337e513f338738c50284615e","src/event.rs":"f60fee9f4b09ef47ff5e4bfa21c07e45ffd5873c292f2605f24d834070127d62","src/header.rs":"b7d4eeb40952b36f71ae1f37ce82c9617af8b84c171576de4eca9d50a3071103","src/hrtime.rs":"45a608ce9f00e2666ce95422a278c6dc0ff4e229b114e7bcf0b4c0d9dc61ad56","src/incrdecoder.rs":"97eb93502afabf13d46de37ca05430c49e876bfa9f013ce264231639eaf9df64","src/lib.rs":"0a3679ab0bc67817097701010881e1c2f48ad1ab0700f12babc46cc59c5c788b","src/log.rs":"b69e492af85e65866cb6588138e8a337dd897d3ce399cb4e9fb8cc04ac042b7f","src/qlog.rs":"ca323c91d61810ebef2ebeb967836dda384a60a9fb492c2b8d1b235a98f2e4bf","src/timer.rs":"e63af7e7df968bf702583f263cfb63e6dca4e599bacffa2de0a6383d85333636","tests/log.rs":"480b165b7907ec642c508b303d63005eee1427115d6973a349eaf6b2242ed18d"},"package":null}

View file

@ -1,6 +1,6 @@
[package]
name = "neqo-common"
version = "0.5.6"
version = "0.5.7"
authors = ["Bobby Holley <bobbyholley@gmail.com>"]
edition = "2018"
license = "MIT/Apache-2.0"

View file

@ -261,9 +261,7 @@ impl Encoder {
#[must_use]
pub fn from_hex(s: impl AsRef<str>) -> Self {
let s = s.as_ref();
if s.len() % 2 != 0 {
panic!("Needs to be even length");
}
assert_eq!(s.len() % 2, 0, "Needs to be even length");
let cap = s.len() / 2;
let mut enc = Self::with_capacity(cap);

View file

@ -108,8 +108,12 @@ pub struct IncrementalDecoderIgnore {
}
impl IncrementalDecoderIgnore {
/// Make a new ignoring decoder.
/// # Panics
/// If the amount to ignore is zero.
#[must_use]
pub fn new(n: usize) -> Self {
assert_ne!(n, 0);
Self { remaining: n }
}

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"14de87abee81a5f7c0b422f34a9185aff27250a293a1f7fb0d8ae4ad38f919a7","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"d136f82a333b0ee1499e7858fdfc3d630f7ff37501a3c51028a4eeb7e2f136b4","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"4bc26650fa85150c1a377f030e681576f92005a14f14fd8818e7ceb95c2c2a1a","src/aead.rs":"140f77ffb5016836c970c39c6c3a42db9581a14b797b9cd05386d0dd0831fe63","src/aead_fuzzing.rs":"4e60d5a2ee6dedfd08602fa36318239e731244825df2cb801ca1d88f5f2a41c1","src/agent.rs":"db620f07e2c0f441ce0a30a249a18ab699affb43d8209c63c4f1e5eae9f9f688","src/agentio.rs":"85a1f5295e98fcec0e884fb11e21cbc4c64de0f2ad5b6d78464ed9ac064fb495","src/auth.rs":"e821dac1511691151a6e64b7c7130a07d941dffad4529b2631f20ddd07d3f20c","src/cert.rs":"04d7328ab59a5268f2f48b3f880192bf28d42c09c362ef5906ee66e087c754d1","src/constants.rs":"998e77bee88197a240032c1bfbddcff417a25ba82e576a0d2fe18ee9b63cefc7","src/ech.rs":"1b6ee298855d34310a0d65367b21fdc38678a9c37fc7e1d9579c3c8dfd753377","src/err.rs":"d4dbe63e2faba3a1f08dca015549c32550cb18907592abc3831e05e330f0a93b","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"361277879194dc32f741b8d1894afe5fd3fcc8eb244f7dd5914eeb959b85717d","src/hkdf.rs":"3ff432cc9d40e1dc56e9f983b54b593647c4063a5ae0f16de0a64d033ac9bd94","src/hp.rs":"46a2023c421d89fda8d09b356b648272857fd20ee5cf5829143ac88402b32e4b","src/lib.rs":"4a4aacab783b2e21d9f2f33891bc75c5a8018894c3cdf828848ccb59bf5b2d41","src/once.rs":"b9850384899a1a016e839743d3489c0d4d916e1973746ef8c89872105d7d9736","src/p11.rs":"ae054861719fdead8227220dd5a28b92882756683a436676470b672ee26b9a4e","src/prio.rs":"4224a65f25d7de9bf7d6cb18b15597a39650b3c4fcf7d184a4e4bd7f65cebccd","src/replay.rs":"c9bc0261fe1ae22e7212774c315a2669784e57762ca975a15250d4a33dbf3ea3","src/result.rs":"cef34dfcb907723e195b56501132e4560e250b327783cb5e41201da5b63e9b5c","src/secrets.rs":"48790a330994d892742048000bd12460b7eee2c3daaa444481b8527406d0a4c7","src/selfencrypt.rs":"4a9af42ccefbc77c65baedf00ef389de4fa7ed855d7ab3b60542b5931050667d","src/ssl.rs":"32e934e6dc5df4e4b4cbe96bae53921cf09a684959cb5ad3469cd65965f3164c","src/time.rs":"ddecb9f6cb6b3367852943d27fc89fd36d3c0ca0c9b5c9797494b74de2d8b5c7","tests/aead.rs":"a0fe826aa3bfcce22dbe1b06b74823cb2334331ffe6ce6152952613e9e1ccae5","tests/agent.rs":"edda258896324f0a950427fd625594bd31d772fe968a29097d6dbd76523c39c4","tests/ext.rs":"eba9f03accdd598e38292ac88263a81b367d60d5a736a43117a3663de105ec48","tests/handshake.rs":"6ea3e5b3bc889d201b55f959b658a848c0ada54c956bda087b2ac8897a24a786","tests/hkdf.rs":"47830c1ea58a02d100522bdde6fabc02bb447ccb85affa0cdc44bc25da1be32a","tests/hp.rs":"92e062538c01fa7a474225714ed238d846ceb8c8feb9d79eb05be6111b00fb1e","tests/init.rs":"fc9e392b1efa0d8efb28952f73ffc05e5348e7b2b69207b60e375c3888a252a2","tests/selfencrypt.rs":"1125c858ec4e0a6994f34d162aa066cb003c61b324f268529ea04bcb641347cb"},"package":null}
{"files":{"Cargo.toml":"561d34c14380159b08a88a2461a31355e63fbf410725dd28a12bbb3769596675","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"d136f82a333b0ee1499e7858fdfc3d630f7ff37501a3c51028a4eeb7e2f136b4","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"659943cfae3c65be50e92d4b5b499c4580b4c522e9905ba567b434c2081784bc","src/aead.rs":"140f77ffb5016836c970c39c6c3a42db9581a14b797b9cd05386d0dd0831fe63","src/aead_fuzzing.rs":"4e60d5a2ee6dedfd08602fa36318239e731244825df2cb801ca1d88f5f2a41c1","src/agent.rs":"5caad7dfe81b0afbadb4c12e46634961880d12daa4820d9adcfa84c240a36ac2","src/agentio.rs":"bce4c3dfcfa433209a409ac0c0752f8c95ab37bb6239a42f99b83858e8747bd1","src/auth.rs":"e821dac1511691151a6e64b7c7130a07d941dffad4529b2631f20ddd07d3f20c","src/cert.rs":"04d7328ab59a5268f2f48b3f880192bf28d42c09c362ef5906ee66e087c754d1","src/constants.rs":"998e77bee88197a240032c1bfbddcff417a25ba82e576a0d2fe18ee9b63cefc7","src/ech.rs":"1b6ee298855d34310a0d65367b21fdc38678a9c37fc7e1d9579c3c8dfd753377","src/err.rs":"d4dbe63e2faba3a1f08dca015549c32550cb18907592abc3831e05e330f0a93b","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"361277879194dc32f741b8d1894afe5fd3fcc8eb244f7dd5914eeb959b85717d","src/hkdf.rs":"3ff432cc9d40e1dc56e9f983b54b593647c4063a5ae0f16de0a64d033ac9bd94","src/hp.rs":"46a2023c421d89fda8d09b356b648272857fd20ee5cf5829143ac88402b32e4b","src/lib.rs":"db8cbe315dbfd32c187d63000b15a2fc758104b844714a96e47330bbf746be57","src/once.rs":"b9850384899a1a016e839743d3489c0d4d916e1973746ef8c89872105d7d9736","src/p11.rs":"ae054861719fdead8227220dd5a28b92882756683a436676470b672ee26b9a4e","src/prio.rs":"4224a65f25d7de9bf7d6cb18b15597a39650b3c4fcf7d184a4e4bd7f65cebccd","src/replay.rs":"c9bc0261fe1ae22e7212774c315a2669784e57762ca975a15250d4a33dbf3ea3","src/result.rs":"cef34dfcb907723e195b56501132e4560e250b327783cb5e41201da5b63e9b5c","src/secrets.rs":"48790a330994d892742048000bd12460b7eee2c3daaa444481b8527406d0a4c7","src/selfencrypt.rs":"4a9af42ccefbc77c65baedf00ef389de4fa7ed855d7ab3b60542b5931050667d","src/ssl.rs":"32e934e6dc5df4e4b4cbe96bae53921cf09a684959cb5ad3469cd65965f3164c","src/time.rs":"ddecb9f6cb6b3367852943d27fc89fd36d3c0ca0c9b5c9797494b74de2d8b5c7","tests/aead.rs":"a0fe826aa3bfcce22dbe1b06b74823cb2334331ffe6ce6152952613e9e1ccae5","tests/agent.rs":"94819f9eeba2afa0c25adc821755900f1488fd47af6d84d9507a112c29d1752a","tests/ext.rs":"eba9f03accdd598e38292ac88263a81b367d60d5a736a43117a3663de105ec48","tests/handshake.rs":"0fcfa8958686aacb42c56c51c6b234842fe990470d2069a67509869baaa18452","tests/hkdf.rs":"47830c1ea58a02d100522bdde6fabc02bb447ccb85affa0cdc44bc25da1be32a","tests/hp.rs":"92e062538c01fa7a474225714ed238d846ceb8c8feb9d79eb05be6111b00fb1e","tests/init.rs":"fc9e392b1efa0d8efb28952f73ffc05e5348e7b2b69207b60e375c3888a252a2","tests/selfencrypt.rs":"1125c858ec4e0a6994f34d162aa066cb003c61b324f268529ea04bcb641347cb"},"package":null}

View file

@ -1,6 +1,6 @@
[package]
name = "neqo-crypto"
version = "0.5.6"
version = "0.5.7"
authors = ["Martin Thomson <mt@lowentropy.net>"]
edition = "2018"
build = "build.rs"
@ -15,6 +15,7 @@ bindgen = {version = "0.56", default-features = false, features= ["runtime"]}
serde = "1.0"
serde_derive = "1.0"
toml = "0.4"
mozbuild = {version = "0.1", optional = true}
[dev-dependencies]
test-fixture = { path = "../test-fixture" }
@ -22,5 +23,5 @@ test-fixture = { path = "../test-fixture" }
[features]
default = ["deny-warnings"]
deny-warnings = []
gecko = []
gecko = ["mozbuild"]
fuzzing = []

View file

@ -315,10 +315,13 @@ fn setup_standalone() -> Vec<String> {
flags
}
#[cfg(feature = "gecko")]
fn setup_for_gecko() -> Vec<String> {
use mozbuild::TOPOBJDIR;
let mut flags: Vec<String> = Vec::new();
let fold_libs = env::var("MOZ_FOLD_LIBS").unwrap_or_default() == "1";
let fold_libs = mozbuild::config::MOZ_FOLD_LIBS;
let libs = if fold_libs {
vec!["nss3"]
} else {
@ -329,18 +332,17 @@ fn setup_for_gecko() -> Vec<String> {
println!("cargo:rustc-link-lib=dylib={}", lib);
}
if let Some(path) = env::var_os("MOZ_TOPOBJDIR").map(PathBuf::from) {
if fold_libs {
println!(
"cargo:rustc-link-search=native={}",
path.join("security").to_str().unwrap()
TOPOBJDIR.join("security").to_str().unwrap()
);
} else {
println!(
"cargo:rustc-link-search=native={}",
path.join("dist").join("bin").to_str().unwrap()
TOPOBJDIR.join("dist").join("bin").to_str().unwrap()
);
let nsslib_path = path.join("security").join("nss").join("lib");
let nsslib_path = TOPOBJDIR.join("security").join("nss").join("lib");
println!(
"cargo:rustc-link-search=native={}",
nsslib_path.join("nss").join("nss_nss3").to_str().unwrap()
@ -351,7 +353,8 @@ fn setup_for_gecko() -> Vec<String> {
);
println!(
"cargo:rustc-link-search=native={}",
path.join("config")
TOPOBJDIR
.join("config")
.join("external")
.join("nspr")
.join("pr")
@ -360,7 +363,7 @@ fn setup_for_gecko() -> Vec<String> {
);
}
let flags_path = path.join("netwerk/socket/neqo/extra-bindgen-flags");
let flags_path = TOPOBJDIR.join("netwerk/socket/neqo/extra-bindgen-flags");
println!("cargo:rerun-if-changed={}", flags_path.to_str().unwrap());
flags = fs::read_to_string(flags_path)
@ -371,19 +374,22 @@ fn setup_for_gecko() -> Vec<String> {
flags.push(String::from("-include"));
flags.push(
path.join("dist")
TOPOBJDIR
.join("dist")
.join("include")
.join("mozilla-config.h")
.to_str()
.unwrap()
.to_string(),
);
} else {
println!("cargo:warning=MOZ_TOPOBJDIR should be set by default, otherwise the build is not guaranteed to finish.");
}
flags
}
#[cfg(not(feature = "gecko"))]
fn setup_for_gecko() -> Vec<String> {
unreachable!()
}
fn main() {
let flags = if cfg!(feature = "gecko") {
setup_for_gecko()

View file

@ -277,7 +277,6 @@ pub struct SecretAgent {
now: TimeHolder,
extension_handlers: Vec<ExtensionTracker>,
inf: Option<SecretAgentInfo>,
/// The encrypted client hello (ECH) configuration that is in use.
/// Empty if ECH is not enabled.
@ -300,7 +299,6 @@ impl SecretAgent {
now: TimeHolder::default(),
extension_handlers: Vec::new(),
inf: None,
ech_config: Vec::new(),
})
@ -795,7 +793,12 @@ impl ResumptionToken {
/// A TLS Client.
#[derive(Debug)]
#[allow(clippy::box_vec)] // We need the Box.
#[allow(
renamed_and_removed_lints,
clippy::box_vec,
unknown_lints,
clippy::box_collection
)] // We need the Box.
pub struct Client {
agent: SecretAgent,
@ -983,14 +986,12 @@ impl ZeroRttChecker for AllowZeroRtt {
#[derive(Debug)]
struct ZeroRttCheckState {
fd: *mut ssl::PRFileDesc,
checker: Pin<Box<dyn ZeroRttChecker>>,
}
impl ZeroRttCheckState {
pub fn new(fd: *mut ssl::PRFileDesc, checker: Box<dyn ZeroRttChecker>) -> Self {
pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self {
Self {
fd,
checker: Pin::new(checker),
}
}
@ -1085,7 +1086,7 @@ impl Server {
max_early_data: u32,
checker: Box<dyn ZeroRttChecker>,
) -> Res<()> {
let mut check_state = Box::pin(ZeroRttCheckState::new(self.agent.fd, checker));
let mut check_state = Box::pin(ZeroRttCheckState::new(checker));
unsafe {
ssl::SSL_HelloRetryRequestCallback(
self.agent.fd,

View file

@ -31,14 +31,6 @@ pub fn as_c_void<T: Unpin>(pin: &mut Pin<Box<T>>) -> *mut c_void {
(Pin::into_inner(pin.as_mut()) as *mut T).cast()
}
// This holds the length of the slice, not the slice itself.
#[derive(Default, Debug)]
struct RecordLength {
epoch: Epoch,
ct: ContentType,
len: usize,
}
/// A slice of the output.
#[derive(Default)]
pub struct Record {

View file

@ -144,8 +144,8 @@ pub fn init() {
#[cfg(debug_assertions)]
fn enable_ssl_trace() {
let opt = ssl::Opt::Locking.as_int();
let mut _v: ::std::os::raw::c_int = 0;
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut _v) })
let mut v: ::std::os::raw::c_int = 0;
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) })
.expect("SSL_OptionGetDefault failed");
}

View file

@ -421,7 +421,7 @@ fn ech_retry() {
let mut cfg = Vec::from(server.ech_config());
// Ensure that the version and config_id is correct.
assert_eq!(cfg[2], 0xfe);
assert_eq!(cfg[3], 0x0a);
assert_eq!(cfg[3], 0x0d);
assert_eq!(cfg[6], CONFIG_ID);
// Change the config_id so that the server doesn't recognize this.
cfg[6] ^= 0x94;

View file

@ -36,7 +36,7 @@ fn handshake(now: Instant, client: &mut SecretAgent, server: &mut SecretAgent) {
let mut records = a.handshake_raw(now, None).unwrap();
let is_done = |agent: &mut SecretAgent| agent.state().is_final();
while !is_done(b) {
records = if let Ok(r) = forward_records(now, &mut b, records) {
records = if let Ok(r) = forward_records(now, b, records) {
r
} else {
// TODO(mt) take the alert generated by the failed handshake

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"4486b7bf92f9672abeebfe99a31ac468f2d34e26fc6e30804e65eda1a996e3b7","src/buffered_send_stream.rs":"0e4ad4914451943e49c88565661d88475ab2bbd8756e200e2a19312bafd64847","src/client_events.rs":"3dab2de2ec5fac43ff30470e0e8167fdbfede4bc467c0fcd027d809692b7465b","src/conn_params.rs":"00e3f42636f482f91cd6b86d7bebaa85a9f0407143503767fffb66a3cdfbbe43","src/connection.rs":"9c831fb668c72202a1205d12b7056254697f62590e48fd743f4313851aa34454","src/connection_client.rs":"30c6d5fa8f982237a37d19097c94a664a65174fb567210824ee6154cd3fad577","src/connection_server.rs":"1e9bca9a0a8ac4532015032b1444b23f67007b8c6b3f138900f0b142cf5625d1","src/control_stream_local.rs":"49bc3b8d2da81477fa5914d80e32657e215e84ba2c40411eb18ae170ccddecd0","src/control_stream_remote.rs":"b6c4b96e0b35d54a5fee9297945c2dc6458729af2566323b13e44a621b214d72","src/features/extended_connect/mod.rs":"95e8468e62e2e7680989b6d85397775d3a99ce68390dc68708c3fb3450932e03","src/features/extended_connect/session.rs":"5f1035082891554194c89d80338ccd0991db0085bdf8be358a2c3a7ac2b52dbc","src/features/extended_connect/webtransport.rs":"721497ea16aaf55a3be4b685afe23fdbcb2c19603042898f12d93ea4e5d8aac7","src/features/mod.rs":"a981ebbd03e7bb7ea2313e883452e44f052c48f28edb7fd53a0825911b490230","src/headers_checks.rs":"4d8ce4ba54a989ecc257a26d47111ab4583cf78a3ae6c60ad106f3ad3e77ac8f","src/hframe.rs":"79311512aafe7ac5cbda068352bbce573ee99205baa06fc75db95cc3dbbf0780","src/lib.rs":"ed19bb708b711d0b69430a632d5524e93a115239d63de094ae78611e0c2af026","src/priority.rs":"89d25666fb3b35e16c58055e51d25ff746fc420820db9f30fdecfd1ed70ac020","src/push_controller.rs":"7db0c4c1e065372d3adba90662ff20c4e36adade25f64d2168616339cc5bf47d","src/qlog.rs":"44b6cdbb1d9d6ca47b793e9dbe531b8fdbd40147375f7e4c89aeab536c5d286b","src/qpack_decoder_receiver.rs":"75008d8ea5d538ee34aca4df72e58489417604ccafb61b064280782d6754dd0d","src/qpack_encoder_receiver.rs":"f95cc7d49e4d442b93d522f14ddfc581629664d84d6e13d03a520e855bbe442d","src/recv_message.rs":"3d1556087aecae432589ce55459430a19c1d35c4c819ff4b2e2869a12e6a7b3d","src/request_target.rs":"9182b641f7a7b55272e0e2e872d24a35b1207f35399221b08b899857c3e873ab","src/send_message.rs":"7a676b0f6f34f7c52e98f346d7009345fec84e7db03d11dbe2a50ae95d433369","src/server.rs":"0b3775863339f8ea9d363b83f2c667bda4b62de177bb2e93caef3830ba7c46a8","src/server_connection_events.rs":"5f3eadceb0c555ab13f68ec49af0eaa7a1ebf1fdd14e1132d77583ecd76cabbb","src/server_events.rs":"42c79731c5168df35ebd3cef448d9e38e64770b31363b2b4965679582db9f61e","src/settings.rs":"8f7e3ddd807c3d5667dad93f5b902b0e00a3bcf85e41404e1e0e5dfd2c3d5dd6","src/stream_type_reader.rs":"62fb15a5b64b9777ddfcb16c3d43031797336dca39dd70340776022431b79d77","tests/httpconn.rs":"6157230714dc34a93637d7188320b0f135c400d7fd247dd48279e4db0cd0b149","tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","tests/priority.rs":"e3258878d4b692c5d80612c1d275972a99808168ccc33fdd966822a0c8776bb9","tests/send_message.rs":"673ae1d0bf2dce46c21ee8353f45f189d2cb64a2f6e137ae38da6b2262ad066e","tests/webtransport/mod.rs":"ef8f83789925502b5121fa6285bfcf6c09e2299a9360bf0ed2c8329ea0c25fc4","tests/webtransport/negotiation.rs":"866cec03df0643fa0f28a160a97112a4ab3c1ca337d8e00e81eb7eb42f67d41b","tests/webtransport/sessions.rs":"af6a336cac13bc4be1be329e9f18d48dd23c99975d20f719e3e59be6214aa9f4","tests/webtransport/streams.rs":"560b86ea59c92159b16099c26b6f042b5e8c0fc3f21983a00da677987670c4f9"},"package":null}
{"files":{"Cargo.toml":"9ff10a0f34e30e86b9c005058e50ba588b39a78330ece04e29e8b1e3ef060471","src/buffered_send_stream.rs":"0e4ad4914451943e49c88565661d88475ab2bbd8756e200e2a19312bafd64847","src/client_events.rs":"9d86145febad2f3fb05007eae3f5ad4834c78dd709fe388f05590405e34a614b","src/conn_params.rs":"00e3f42636f482f91cd6b86d7bebaa85a9f0407143503767fffb66a3cdfbbe43","src/connection.rs":"ef2e70c08a59a8a60c7852c8beb6545e3cb37d8da94302798bdc2fc38e4f76c0","src/connection_client.rs":"fabd645653c5991b8d2ec6f2a3c4e16e32c4faa292da47f5c5eb405ef39f8a7a","src/connection_server.rs":"de1e0359b902b1e98c923a8d5488302a68a3312b466590fdddaee6ec8327813b","src/control_stream_local.rs":"9ceb1aae8079dfca8e2f38fb555d47e84f3001d9501f2c909e8245841974f49c","src/control_stream_remote.rs":"7a261ac7df77e90a428ab0f92457a934a92a8c581462fc1818efd3de0c0ebd69","src/features/extended_connect/mod.rs":"2bc2f0570b11318f3225173001dad1a5f05e4bf60dee49a2bf9d40e3a411e138","src/features/extended_connect/webtransport_session.rs":"791734e1891bd35541be33cbf744d6edee6278e760fedb12839dd052a0cf91ba","src/features/extended_connect/webtransport_streams.rs":"784c5e317bb6af33f653ba82c1a5666b657c2a210263a415e913494f61613464","src/features/mod.rs":"a981ebbd03e7bb7ea2313e883452e44f052c48f28edb7fd53a0825911b490230","src/frames/hframe.rs":"67018ad85ecb9ec0476dafc4b25f6b7015e49531cd19f8e81d204af0e29ee3ea","src/frames/mod.rs":"258dd4bdf2daca19a62cd697d2c7f4709a35668b2b4dce3203675e814c9b40b8","src/frames/reader.rs":"0802cd8b41204bcec424fc6ed704a3bdbed0e5d38444f7a9b0550ad877b076a6","src/frames/tests/hframe.rs":"33a30bb98bb512606a06ae1752e1ed9e4588b7d3f5e9439ec83bb2e779d4ac80","src/frames/tests/mod.rs":"fd2e9d4a28c3bd2fd349f4e3844cefa37e9addb09561e9261b393ca7a37e6c6e","src/frames/tests/reader.rs":"312a3deda7b3a4bbd7afed879c94d0644fce8e34435365ef9cae1fbaa62496af","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d87964fe76945bfe3e59834632ce1e3a000b5e26164b71bdcd129f8a4e73ae3","src/headers_checks.rs":"0893d48fde97687b712e86457e75f2a1b802e7589ce38df30ff65684d8cf59c0","src/lib.rs":"4876915dd7f03021cce3a166e12e0a3763ac2c44e6ad81d223cda1f555b7a2c2","src/priority.rs":"ae0fa461031893b4f7e0d12666072e7a4da80b1e8a1c0663ab9f9e27b3242754","src/push_controller.rs":"aa2a64180d8cb1b87682d0d8bbc42167188e8e1890261cb4cabb76de1fcc708b","src/qlog.rs":"44b6cdbb1d9d6ca47b793e9dbe531b8fdbd40147375f7e4c89aeab536c5d286b","src/qpack_decoder_receiver.rs":"75008d8ea5d538ee34aca4df72e58489417604ccafb61b064280782d6754dd0d","src/qpack_encoder_receiver.rs":"f95cc7d49e4d442b93d522f14ddfc581629664d84d6e13d03a520e855bbe442d","src/recv_message.rs":"5f70fb474e387653d7982374131b3b0c08417509469f273ccebf842bfcee836f","src/request_target.rs":"9182b641f7a7b55272e0e2e872d24a35b1207f35399221b08b899857c3e873ab","src/send_message.rs":"f3503bf135af5acdb3663a8b591b1db2d160e3dcec37aafd2053f2f150f68d2a","src/server.rs":"3cde23011de0a63ee4900e41368e9319ce100a1584f90bf5463e054adcc8875c","src/server_connection_events.rs":"3d89c2d9a30ee719acfbaae4b7720cb354eb73b11bc6ceb44571d68b05192b8b","src/server_events.rs":"3081fdd1e1950aeecae031452cd683335fb0a9dcec51722e614c5939f747b9d9","src/settings.rs":"8a8919cd31683f476dec281b8b545ea3cedb0c7d60cd1e29b097bae605822d47","src/stream_type_reader.rs":"d63727341d925241ec17c7373d81145aba1464cac4c9eedfc05f24c453435f67","tests/httpconn.rs":"f8d6e6a693d17cf2eb192a730e6fc929bd2814552356ce8d4423a0e3eac8c59d","tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","tests/priority.rs":"a606e5fa03451e09e28c7d5f1820ee85a4567e3969a1690c979761e62123bf54","tests/send_message.rs":"673ae1d0bf2dce46c21ee8353f45f189d2cb64a2f6e137ae38da6b2262ad066e","tests/webtransport/mod.rs":"635c0b0fe682a844f4366335a40b8b3a6539abe30843ee1bcfaf87a34b1d476c","tests/webtransport/negotiation.rs":"fd46a3a77c75dfb701ac075cdb0aabb58f82b5d5c03c5a965412bbf6ad020f00","tests/webtransport/sessions.rs":"5b4d8483ac018ad5a28adad5e778e2ed48db9c441d1354f6cf21d8e5c6f1a8b3","tests/webtransport/streams.rs":"fd5f075d93f0241290566f59f747d95530d2df579890fd0f6b9e79a557c89a67"},"package":null}

View file

@ -1,6 +1,6 @@
[package]
name = "neqo-http3"
version = "0.5.6"
version = "0.5.7"
authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
edition = "2018"
license = "MIT/Apache-2.0"
@ -16,6 +16,7 @@ qlog = "0.4.0"
sfv = "0.9.1"
url = "2.0"
lazy_static = "1.3.0"
enumset = "1.0.8"
[dev-dependencies]
test-fixture = { path = "../test-fixture" }

View file

@ -9,7 +9,7 @@
use crate::connection::Http3State;
use crate::settings::HSettingType;
use crate::{
features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType},
features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason},
CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents,
};
use neqo_common::{event::Provider as EventProvider, Header};
@ -23,10 +23,13 @@ use std::rc::Rc;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum WebTransportEvent {
Negotiated(bool),
Session(StreamId),
Session {
stream_id: StreamId,
status: u16,
},
SessionClosed {
stream_id: StreamId,
error: Option<AppError>,
reason: SessionCloseReason,
},
NewStream {
stream_id: StreamId,
@ -174,11 +177,12 @@ impl SendStreamEvents for Http3ClientEvents {
}
impl ExtendedConnectEvents for Http3ClientEvents {
fn session_start(&self, connect_type: ExtendedConnectType, stream_id: StreamId) {
fn session_start(&self, connect_type: ExtendedConnectType, stream_id: StreamId, status: u16) {
if connect_type == ExtendedConnectType::WebTransport {
self.insert(Http3ClientEvent::WebTransport(WebTransportEvent::Session(
self.insert(Http3ClientEvent::WebTransport(WebTransportEvent::Session {
stream_id,
)));
status,
}));
} else {
unreachable!("There is only ExtendedConnectType::WebTransport.");
}
@ -188,11 +192,11 @@ impl ExtendedConnectEvents for Http3ClientEvents {
&self,
connect_type: ExtendedConnectType,
stream_id: StreamId,
error: Option<AppError>,
reason: SessionCloseReason,
) {
if connect_type == ExtendedConnectType::WebTransport {
self.insert(Http3ClientEvent::WebTransport(
WebTransportEvent::SessionClosed { stream_id, error },
WebTransportEvent::SessionClosed { stream_id, reason },
));
} else {
unreachable!("There are no other types.");
@ -325,10 +329,10 @@ impl Http3ClientEvents {
});
}
pub fn negotiation_done(&self, feature_type: HSettingType, negotiated: bool) {
pub fn negotiation_done(&self, feature_type: HSettingType, succeeded: bool) {
if feature_type == HSettingType::EnableWebTransport {
self.insert(Http3ClientEvent::WebTransport(
WebTransportEvent::Negotiated(negotiated),
WebTransportEvent::Negotiated(succeeded),
));
}
}

View file

@ -9,10 +9,11 @@
use crate::control_stream_local::ControlStreamLocal;
use crate::control_stream_remote::ControlStreamRemote;
use crate::features::extended_connect::{
webtransport::{WebTransportRecvStream, WebTransportSendStream},
ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectSession, ExtendedConnectType,
webtransport_session::WebTransportSession,
webtransport_streams::{WebTransportRecvStream, WebTransportSendStream},
ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType,
};
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::push_controller::PushController;
use crate::qpack_decoder_receiver::DecoderRecvStream;
use crate::qpack_encoder_receiver::EncoderRecvStream;
@ -241,11 +242,11 @@ impl Http3Connection {
if let Some(recv_stream) = self.recv_streams.get_mut(&stream_id) {
let res = recv_stream.receive(conn);
self.handle_stream_manipulation_output(res, stream_id, conn)
.map(|(output, _)| output)
} else {
Ok(ReceiveOutput::NoOutput)
return self
.handle_stream_manipulation_output(res, stream_id, conn)
.map(|(output, _)| output);
}
Ok(ReceiveOutput::NoOutput)
}
fn handle_unblocked_streams(
@ -260,10 +261,8 @@ impl Http3Connection {
.http_stream()
.ok_or(Error::HttpInternal(10))?
.header_unblocked(conn);
debug_assert!(matches!(
self.handle_stream_manipulation_output(res, stream_id, conn)?,
(ReceiveOutput::NoOutput, _)
));
let res = self.handle_stream_manipulation_output(res, stream_id, conn)?;
debug_assert!(matches!(res, (ReceiveOutput::NoOutput, _)));
}
}
Ok(())
@ -484,11 +483,13 @@ impl Http3Connection {
qinfo!([self], "A new http stream {}.", stream_id);
}
NewStreamType::WebTransportStream(session_id) => {
if self
.webtransport
.get_session(StreamId::from(session_id))
.is_none()
{
let session_exists = self
.send_streams
.get(&StreamId::from(session_id))
.map_or(false, |s| {
s.stream_type() == Http3StreamType::ExtendedConnect
});
if !session_exists {
conn.stream_stop_sending(stream_id, Error::HttpStreamCreation.code())?;
return Ok(ReceiveOutput::NoOutput);
}
@ -590,7 +591,6 @@ impl Http3Connection {
pub fn fetch<'b, 't, T>(
&mut self,
conn: &mut Connection,
stream_type: Http3StreamType,
send_events: Box<dyn SendStreamEvents>,
recv_events: Box<dyn HttpRecvStreamEvents>,
push_handler: Option<Rc<RefCell<PushController>>>,
@ -605,6 +605,12 @@ impl Http3Connection {
request.method,
request.target,
);
let id = self.create_bidi_transport_stream(conn)?;
self.fetch_with_stream(id, conn, send_events, recv_events, push_handler, request)?;
Ok(id)
}
fn create_bidi_transport_stream(&self, conn: &mut Connection) -> Res<StreamId> {
// Requests cannot be created when a connection is in states: Initializing, GoingAway, Closing and Closed.
match self.state() {
Http3State::GoingAway(..) | Http3State::Closing(..) | Http3State::Closed(..) => {
@ -618,13 +624,33 @@ impl Http3Connection {
.stream_create(StreamType::BiDi)
.map_err(|e| Error::map_stream_create_errors(&e))?;
conn.stream_keep_alive(id, true)?;
Ok(id)
}
fn fetch_with_stream<'b, 't, T>(
&mut self,
stream_id: StreamId,
conn: &mut Connection,
send_events: Box<dyn SendStreamEvents>,
recv_events: Box<dyn HttpRecvStreamEvents>,
push_handler: Option<Rc<RefCell<PushController>>>,
request: &RequestDescription<'b, 't, T>,
) -> Res<()>
where
T: AsRequestTarget<'t> + ?Sized + Debug,
{
let final_headers = Http3Connection::create_fetch_headers(request)?;
let stream_type = if request.connect_type.is_some() {
Http3StreamType::ExtendedConnect
} else {
Http3StreamType::Http
};
let mut send_message = SendMessage::new(
MessageType::Request,
stream_type,
id,
stream_id,
self.qpack_encoder.clone(),
send_events,
);
@ -635,13 +661,13 @@ impl Http3Connection {
.send_headers(&final_headers, conn)?;
self.add_streams(
id,
stream_id,
Box::new(send_message),
Box::new(RecvMessage::new(
&RecvMessageInfo {
message_type: MessageType::Response,
stream_type,
stream_id: id,
stream_id,
header_frame_type_read: false,
},
Rc::clone(&self.qpack_decoder),
@ -654,10 +680,10 @@ impl Http3Connection {
// Call immediately send so that at least headers get sent. This will make Firefox faster, since
// it can send request body immediatly in most cases and does not need to do a complete process loop.
self.send_streams
.get_mut(&id)
.get_mut(&stream_id)
.ok_or(Error::InvalidStreamId)?
.send(conn)?;
Ok(id)
Ok(())
}
/// Stream data are read directly into a buffer supplied as a parameter of this function to avoid copying
@ -733,7 +759,7 @@ impl Http3Connection {
error: AppError,
conn: &mut Connection,
) -> Res<()> {
qinfo!([self], "reset_:stream {} error={}.", stream_id, error);
qinfo!([self], "cancel_fetch {} error={}.", stream_id, error);
let send_stream = self.send_streams.get(&stream_id);
let recv_stream = self.recv_streams.get(&stream_id);
match (send_stream, recv_stream) {
@ -791,6 +817,8 @@ impl Http3Connection {
mem::drop(send_stream.close(conn));
if send_stream.done() {
self.remove_send_stream(stream_id, conn);
} else if send_stream.has_data_to_send() {
self.streams_with_pending_data.insert(stream_id);
}
Ok(())
}
@ -810,26 +838,32 @@ impl Http3Connection {
return Err(Error::Unavailable);
}
let extended_conn = Rc::new(RefCell::new(ExtendedConnectSession::new(
ExtendedConnectType::WebTransport,
let id = self.create_bidi_transport_stream(conn)?;
let extended_conn = Rc::new(RefCell::new(WebTransportSession::new(
id,
events,
self.role,
Rc::clone(&self.qpack_encoder),
Rc::clone(&self.qpack_decoder),
)));
let id = self.fetch(
conn,
Http3StreamType::ExtendedConnect,
self.add_streams(
id,
Box::new(extended_conn.clone()),
Box::new(extended_conn.clone()),
None,
&RequestDescription {
);
let final_headers = Http3Connection::create_fetch_headers(&RequestDescription {
method: "CONNECT",
target,
headers,
connect_type: Some(ExtendedConnectType::WebTransport),
priority: Priority::default(),
},
)?;
self.webtransport.insert(id, extended_conn);
})?;
extended_conn
.borrow_mut()
.send_request(&final_headers, conn)?;
self.streams_with_pending_data.insert(id);
Ok(id)
}
@ -867,50 +901,71 @@ impl Http3Connection {
(Some(s), Some(_r), false) => {
if s.http_stream()
.ok_or(Error::InvalidStreamId)?
.send_headers(&[Header::new(":status", "400")], conn)
.send_headers(&[Header::new(":status", "404")], conn)
.is_ok()
{
mem::drop(self.stream_close_send(conn, stream_id));
mem::drop(self.stream_stop_sending(
conn,
stream_id,
Error::HttpRequestRejected.code(),
));
// TODO issue 1294: add a timer to clean up the recv_stream if the peer does not do that in a short time.
self.streams_with_pending_data.insert(stream_id);
} else {
self.cancel_fetch(stream_id, Error::HttpRequestRejected.code(), conn)?;
}
Ok(())
}
(Some(s), Some(r), true) => {
(Some(s), Some(_r), true) => {
if s.http_stream()
.ok_or(Error::InvalidStreamId)?
.send_headers(&[Header::new(":status", "200")], conn)
.is_ok()
{
let extended_conn = Rc::new(RefCell::new(ExtendedConnectSession::new(
ExtendedConnectType::WebTransport,
let extended_conn =
Rc::new(RefCell::new(WebTransportSession::new_with_http_streams(
stream_id,
events,
self.role,
self.recv_streams.remove(&stream_id).unwrap(),
self.send_streams.remove(&stream_id).unwrap(),
)));
s.http_stream()
.unwrap()
.set_new_listener(Box::new(extended_conn.clone()));
r.http_stream()
.unwrap()
.set_new_listener(Box::new(extended_conn.clone()));
extended_conn.borrow_mut().negotiation_done(stream_id, true);
self.webtransport.insert(stream_id, extended_conn);
self.add_streams(
stream_id,
Box::new(extended_conn.clone()),
Box::new(extended_conn),
);
self.streams_with_pending_data.insert(stream_id);
} else {
self.cancel_fetch(stream_id, Error::HttpRequestRejected.code(), conn)?;
return Err(Error::InvalidStreamId);
}
Ok(())
}
}
}
pub(crate) fn webtransport_close_session(
&mut self,
conn: &mut Connection,
session_id: StreamId,
error: u32,
message: &str,
) -> Res<()> {
qtrace!("Clos WebTransport session {:?}", session_id);
let send_stream = self
.send_streams
.get_mut(&session_id)
.ok_or(Error::InvalidStreamId)?;
if send_stream.stream_type() != Http3StreamType::ExtendedConnect {
return Err(Error::InvalidStreamId);
}
send_stream.close_with_message(conn, error, message)?;
if send_stream.done() {
self.remove_send_stream(session_id, conn);
} else if send_stream.has_data_to_send() {
self.streams_with_pending_data.insert(session_id);
}
Ok(())
}
pub fn webtransport_create_stream_local(
&mut self,
conn: &mut Connection,
@ -926,8 +981,10 @@ impl Http3Connection {
);
let wt = self
.webtransport
.get_session(session_id)
.recv_streams
.get(&session_id)
.ok_or(Error::InvalidStreamId)?
.webtransport()
.ok_or(Error::InvalidStreamId)?;
if !wt.borrow().is_active() {
return Err(Error::InvalidStreamId);
@ -962,8 +1019,10 @@ impl Http3Connection {
);
let wt = self
.webtransport
.get_session(session_id)
.recv_streams
.get(&session_id)
.ok_or(Error::InvalidStreamId)?
.webtransport()
.ok_or(Error::InvalidStreamId)?;
self.webtransport_create_stream_internal(
@ -979,7 +1038,7 @@ impl Http3Connection {
fn webtransport_create_stream_internal(
&mut self,
webtransport_session: Rc<RefCell<ExtendedConnectSession>>,
webtransport_session: Rc<RefCell<WebTransportSession>>,
stream_id: StreamId,
session_id: StreamId,
send_events: Box<dyn SendStreamEvents>,
@ -1155,10 +1214,7 @@ impl Http3Connection {
.http_stream()
.ok_or(Error::InvalidStreamId)?;
if stream
.priority_handler_mut()
.maybe_update_priority(priority)
{
if stream.maybe_update_priority(priority) {
self.control_stream_local.queue_update_priority(stream_id);
Ok(true)
} else {
@ -1205,8 +1261,17 @@ impl Http3Connection {
Ok(())
}
fn remove_extended_connect(&mut self, stream_id: StreamId, conn: &mut Connection) {
if let Some((recv, send)) = self.webtransport.remove(stream_id) {
fn remove_extended_connect(
&mut self,
wt: &Rc<RefCell<WebTransportSession>>,
conn: &mut Connection,
) {
let out = wt.borrow_mut().take_sub_streams();
if out.is_none() {
return;
}
let (recv, send) = out.unwrap();
for id in recv {
qtrace!("Remove the extended connect sub receiver stream {}", id);
// Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would have
@ -1219,14 +1284,11 @@ impl Http3Connection {
for id in send {
qtrace!("Remove the extended connect sub send stream {}", id);
if let Some(mut s) = self.send_streams.remove(&id) {
s.handle_stop_sending(CloseType::ResetRemote(
Error::HttpRequestCancelled.code(),
));
s.handle_stop_sending(CloseType::ResetRemote(Error::HttpRequestCancelled.code()));
}
mem::drop(conn.stream_reset_send(id, Error::HttpRequestCancelled.code()));
}
}
}
fn remove_recv_stream(
&mut self,
@ -1236,7 +1298,10 @@ impl Http3Connection {
let stream = self.recv_streams.remove(&stream_id);
if let Some(ref s) = stream {
if s.stream_type() == Http3StreamType::ExtendedConnect {
self.remove_extended_connect(stream_id, conn);
self.send_streams.remove(&stream_id).unwrap();
if let Some(wt) = s.webtransport() {
self.remove_extended_connect(&wt, conn);
}
}
}
stream
@ -1250,7 +1315,9 @@ impl Http3Connection {
let stream = self.send_streams.remove(&stream_id);
if let Some(ref s) = stream {
if s.stream_type() == Http3StreamType::ExtendedConnect {
self.remove_extended_connect(stream_id, conn);
if let Some(wt) = self.recv_streams.remove(&stream_id).unwrap().webtransport() {
self.remove_extended_connect(&wt, conn);
}
}
}
stream

View file

@ -6,7 +6,7 @@
use crate::client_events::{Http3ClientEvent, Http3ClientEvents};
use crate::connection::{Http3Connection, Http3State, RequestDescription};
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::push_controller::{PushController, RecvPushEvents};
use crate::recv_message::{RecvMessage, RecvMessageInfo};
use crate::request_target::AsRequestTarget;
@ -166,15 +166,25 @@ impl Http3Client {
self.conn.odcid().expect("Client always has odcid")
}
/// A resumption token encodes transport and settings parameter as well.
fn create_resumption_token(&mut self, token: &ResumptionToken) {
if let Some(settings) = self.base_handler.get_settings() {
fn encode_resumption_token(&self, token: &ResumptionToken) -> Option<ResumptionToken> {
self.base_handler.get_settings().map(|settings| {
let mut enc = Encoder::default();
settings.encode_frame_contents(&mut enc);
enc.encode(token.as_ref());
self.events
.resumption_token(ResumptionToken::new(enc.into(), token.expiration_time()));
ResumptionToken::new(enc.into(), token.expiration_time())
})
}
/// Get a resumption token. The correct way to obtain a resumption token is
/// waiting for the `Http3ClientEvent::ResumptionToken` event. However, some
/// servers don't send `NEW_TOKEN` frames and so that event might be slow in
/// arriving. This is especially a problem for short-lived connections, where
/// the connection is closed before any events are released. This retrieves
/// the token, without waiting for the `NEW_TOKEN` frame to arrive.
pub fn take_resumption_token(&mut self, now: Instant) -> Option<ResumptionToken> {
self.conn
.take_resumption_token(now)
.and_then(|t| self.encode_resumption_token(&t))
}
/// This may be call if an application has a resumption token. This must be called before connection starts.
@ -269,7 +279,6 @@ impl Http3Client {
{
let output = self.base_handler.fetch(
&mut self.conn,
Http3StreamType::Http,
Box::new(self.events.clone()),
Box::new(self.events.clone()),
Some(Rc::clone(&self.push_handler)),
@ -289,8 +298,8 @@ impl Http3Client {
output
}
/// Send an [`PRIORITY_UPDATE`-frame][1] on next [Http3Client::process_output()]-call,
/// returns if the priority got changed
/// Send an [`PRIORITY_UPDATE`-frame][1] on next `Http3Client::process_output()` call.
/// Returns if the priority got changed.
/// # Errors
/// `InvalidStreamId` if the stream does not exist
///
@ -336,7 +345,7 @@ impl Http3Client {
/// To supply a request body this function is called (headers are supplied through the `fetch` function.)
/// # Errors
/// `InvalidStreamId` if thee stream does not exist,
/// `InvalidStreamId` if the stream does not exist,
/// `AlreadyClosed` if the stream has already been closed.
/// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output`
/// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.)
@ -436,6 +445,22 @@ impl Http3Client {
output
}
/// Close `WebTransport` cleanly
/// # Errors
/// `InvalidStreamId` if the stream does not exist,
/// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output`
/// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.)
/// `InvalidInput` if an empty buffer has been supplied.
pub fn webtransport_close_session(
&mut self,
session_id: StreamId,
error: u32,
message: &str,
) -> Res<()> {
self.base_handler
.webtransport_close_session(&mut self.conn, session_id, error, message)
}
/// # Errors
/// This may return an error if the particular session does not exist
/// or the connection is not in the active state.
@ -588,7 +613,9 @@ impl Http3Client {
self.push_handler.borrow_mut().handle_zero_rtt_rejected();
}
ConnectionEvent::ResumptionToken(token) => {
self.create_resumption_token(&token);
if let Some(t) = self.encode_resumption_token(&token) {
self.events.resumption_token(t);
}
}
ConnectionEvent::SendStreamComplete { .. }
| ConnectionEvent::Datagram { .. }
@ -800,7 +827,7 @@ mod tests {
AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent,
Http3Parameters, Http3State, Rc, RefCell,
};
use crate::hframe::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES};
use crate::frames::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES};
use crate::qpack_encoder_receiver::EncoderRecvStream;
use crate::settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS};
use crate::{Http3Server, Priority, RecvStream};
@ -1067,7 +1094,8 @@ mod tests {
}
}
ConnectionEvent::StateChange(State::Connected) => connected = true,
ConnectionEvent::StateChange(_) => {}
ConnectionEvent::StateChange(_)
| ConnectionEvent::SendStreamCreatable { .. } => {}
_ => panic!("unexpected event"),
}
}
@ -6218,7 +6246,7 @@ mod tests {
let push_stream_id = server.conn.stream_create(StreamType::UniDi).unwrap();
let mut d = Encoder::default();
let headers1xx: &[Header] = &[Header::new(":status", "101")];
let headers1xx: &[Header] = &[Header::new(":status", "100")];
server.encode_headers(push_stream_id, headers1xx, &mut d);
let headers200: &[Header] = &[
@ -6713,4 +6741,35 @@ mod tests {
let reset_event = |e| matches!(e, Http3ClientEvent::Reset { stream_id, .. } if stream_id == request_stream_id);
assert!(client.events().any(reset_event));
}
#[test]
fn response_w_101() {
let (mut client, mut server, request_stream_id) = connect_and_send_request(true);
setup_server_side_encoder(&mut client, &mut server);
let mut d = Encoder::default();
let headers1xx = &[Header::new(":status", "101")];
server.encode_headers(request_stream_id, headers1xx, &mut d);
// Send 101 response.
server_send_response_and_exchange_packet(
&mut client,
&mut server,
request_stream_id,
&d,
false,
);
// Stream has been reset because of the 101 response.
let e = client.events().next().unwrap();
assert_eq!(
e,
Http3ClientEvent::Reset {
stream_id: request_stream_id,
error: Error::InvalidHeader.code(),
local: true,
}
);
}
}

View file

@ -5,7 +5,7 @@
// except according to those terms.
use crate::connection::{Http3Connection, Http3State};
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::recv_message::{RecvMessage, RecvMessageInfo};
use crate::send_message::SendMessage;
use crate::server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents};
@ -40,6 +40,11 @@ impl Http3ServerHandler {
}
}
#[must_use]
pub fn state(&self) -> Http3State {
self.base_handler.state()
}
/// Supply a response for a request.
/// # Errors
/// `InvalidStreamId` if the stream does not exist,
@ -146,6 +151,24 @@ impl Http3ServerHandler {
)
}
/// Close `WebTransport` cleanly
/// # Errors
/// `InvalidStreamId` if the stream does not exist,
/// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output`
/// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.)
/// `InvalidInput` if an empty buffer has been supplied.
pub fn webtransport_close_session(
&mut self,
conn: &mut Connection,
session_id: StreamId,
error: u32,
message: &str,
) -> Res<()> {
self.needs_processing = true;
self.base_handler
.webtransport_close_session(conn, session_id, error, message)
}
pub fn webtransport_create_stream(
&mut self,
conn: &mut Connection,
@ -187,7 +210,7 @@ impl Http3ServerHandler {
self.needs_processing = false;
return true;
}
self.base_handler.has_data_to_send() | self.events.has_events()
self.base_handler.has_data_to_send() || self.events.has_events()
}
// This function takes the provided result and check for an error.

View file

@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::{BufferedStream, Http3StreamType, RecvStream, Res};
use neqo_common::{qtrace, Encoder};
use neqo_transport::{Connection, StreamId, StreamType};
@ -74,14 +74,14 @@ impl ControlStreamLocal {
update_stream.stream_type(),
Http3StreamType::Http | Http3StreamType::Push
));
let priority_handler = update_stream.http_stream().unwrap().priority_handler_mut();
let stream = update_stream.http_stream().unwrap();
// in case multiple priority_updates were issued, ignore now irrelevant
if let Some(hframe) = priority_handler.maybe_encode_frame(update_id) {
if let Some(hframe) = stream.priority_update_frame() {
let mut enc = Encoder::new();
hframe.encode(&mut enc);
if self.stream.send_atomic(conn, &enc)? {
priority_handler.priority_update_sent();
stream.priority_update_sent();
} else {
self.outstanding_priority_update.push_front(update_id);
break;

View file

@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::hframe::{HFrame, HFrameReader};
use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper};
use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream};
use neqo_common::qdebug;
use neqo_transport::{Connection, StreamId};
@ -13,7 +13,7 @@ use neqo_transport::{Connection, StreamId};
#[derive(Debug)]
pub(crate) struct ControlStreamRemote {
stream_id: StreamId,
frame_reader: HFrameReader,
frame_reader: FrameReader,
}
impl ::std::fmt::Display for ControlStreamRemote {
@ -26,16 +26,24 @@ impl ControlStreamRemote {
pub fn new(stream_id: StreamId) -> Self {
Self {
stream_id,
frame_reader: HFrameReader::new(),
frame_reader: FrameReader::new(),
}
}
/// Check if a stream is the control stream and read received data.
pub fn receive_single(&mut self, conn: &mut Connection) -> Res<Option<HFrame>> {
qdebug!([self], "Receiving data.");
match self.frame_reader.receive(conn, self.stream_id)? {
match self
.frame_reader
.receive(&mut StreamReaderConnectionWrapper::new(
conn,
self.stream_id,
))? {
(_, true) => Err(Error::HttpClosedCriticalStream),
(s, false) => Ok(s),
(s, false) => {
qdebug!([self], "received {:?}", s);
Ok(s)
}
}
}
}

View file

@ -6,28 +6,45 @@
#![allow(clippy::module_name_repetitions)]
pub mod session;
pub mod webtransport;
pub mod webtransport_session;
pub mod webtransport_streams;
use crate::client_events::Http3ClientEvents;
use crate::features::NegotiationState;
use crate::settings::{HSettingType, HSettings};
use crate::{Http3StreamInfo, Http3StreamType};
use crate::{CloseType, Http3StreamInfo, Http3StreamType};
use neqo_transport::{AppError, StreamId};
pub use session::ExtendedConnectSession;
use std::cell::RefCell;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::fmt::Debug;
use std::rc::Rc;
pub use webtransport_session::WebTransportSession;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SessionCloseReason {
Error(AppError),
Status(u16),
Clean { error: u32, message: String },
}
impl From<CloseType> for SessionCloseReason {
fn from(close_type: CloseType) -> SessionCloseReason {
match close_type {
CloseType::ResetApp(e) | CloseType::ResetRemote(e) | CloseType::LocalError(e) => {
SessionCloseReason::Error(e)
}
CloseType::Done => SessionCloseReason::Clean {
error: 0,
message: "".to_string(),
},
}
}
}
pub trait ExtendedConnectEvents: Debug {
fn session_start(&self, connect_type: ExtendedConnectType, stream_id: StreamId);
fn session_start(&self, connect_type: ExtendedConnectType, stream_id: StreamId, status: u16);
fn session_end(
&self,
connect_type: ExtendedConnectType,
stream_id: StreamId,
error: Option<AppError>,
reason: SessionCloseReason,
);
fn extended_connect_new_stream(&self, stream_info: Http3StreamInfo);
}
@ -66,9 +83,7 @@ impl From<ExtendedConnectType> for HSettingType {
#[derive(Debug)]
pub struct ExtendedConnectFeature {
connect_type: ExtendedConnectType,
feature_negotiation: NegotiationState,
sessions: HashMap<StreamId, Rc<RefCell<ExtendedConnectSession>>>,
}
impl ExtendedConnectFeature {
@ -76,8 +91,6 @@ impl ExtendedConnectFeature {
pub fn new(connect_type: ExtendedConnectType, enable: bool) -> Self {
Self {
feature_negotiation: NegotiationState::new(enable, HSettingType::from(connect_type)),
connect_type,
sessions: HashMap::new(),
}
}
@ -85,21 +98,6 @@ impl ExtendedConnectFeature {
self.feature_negotiation.set_listener(new_listener);
}
pub fn insert(&mut self, stream_id: StreamId, session: Rc<RefCell<ExtendedConnectSession>>) {
self.sessions.insert(stream_id, session);
}
pub fn get_session(
&mut self,
stream_id: StreamId,
) -> Option<Rc<RefCell<ExtendedConnectSession>>> {
if !matches!(self.feature_negotiation, NegotiationState::Negotiated) {
return None;
}
self.sessions.get_mut(&stream_id).cloned()
}
pub fn handle_settings(&mut self, settings: &HSettings) {
self.feature_negotiation.handle_settings(settings);
}
@ -108,13 +106,4 @@ impl ExtendedConnectFeature {
pub fn enabled(&self) -> bool {
self.feature_negotiation.enabled()
}
pub fn remove(
&mut self,
stream_id: StreamId,
) -> Option<(BTreeSet<StreamId>, BTreeSet<StreamId>)> {
self.sessions
.remove(&stream_id)
.and_then(|ec| ec.borrow_mut().take_sub_streams())
}
}

View file

@ -1,179 +0,0 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::module_name_repetitions)]
use super::{ExtendedConnectEvents, ExtendedConnectType};
use crate::{
CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents,
};
use neqo_common::{qtrace, Header, Role};
use neqo_transport::StreamId;
use std::cell::RefCell;
use std::collections::BTreeSet;
use std::mem;
use std::rc::Rc;
#[derive(Debug, PartialEq)]
enum SessionState {
Negotiating,
Active(StreamId),
Done,
}
#[derive(Debug)]
pub struct ExtendedConnectSession {
connect_type: ExtendedConnectType,
state: SessionState,
events: Box<dyn ExtendedConnectEvents>,
send_streams: BTreeSet<StreamId>,
recv_streams: BTreeSet<StreamId>,
role: Role,
}
impl ::std::fmt::Display for ExtendedConnectSession {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(
f,
"ExtendedConnectSesssion for {}",
self.connect_type.string(),
)
}
}
impl ExtendedConnectSession {
#[must_use]
pub fn new(
connect_type: ExtendedConnectType,
events: Box<dyn ExtendedConnectEvents>,
role: Role,
) -> Self {
Self {
connect_type,
state: SessionState::Negotiating,
events,
send_streams: BTreeSet::new(),
recv_streams: BTreeSet::new(),
role,
}
}
fn close(&mut self, stream_id: StreamId, close_type: CloseType) {
if self.state == SessionState::Done {
return;
}
qtrace!("ExtendedConnect close the session");
self.state = SessionState::Done;
if let CloseType::ResetApp(_) = close_type {
return;
}
self.events
.session_end(self.connect_type, stream_id, close_type.error());
}
pub fn negotiation_done(&mut self, stream_id: StreamId, succeeded: bool) {
if self.state == SessionState::Done {
return;
}
self.state = if succeeded {
self.events.session_start(self.connect_type, stream_id);
SessionState::Active(stream_id)
} else {
self.events.session_end(self.connect_type, stream_id, None);
SessionState::Done
};
}
pub fn add_stream(&mut self, stream_id: StreamId) {
if let SessionState::Active(session_id) = self.state {
if stream_id.is_bidi() {
self.send_streams.insert(stream_id);
self.recv_streams.insert(stream_id);
} else if stream_id.is_self_initiated(self.role) {
self.send_streams.insert(stream_id);
} else {
self.recv_streams.insert(stream_id);
}
if !stream_id.is_self_initiated(self.role) {
self.events
.extended_connect_new_stream(Http3StreamInfo::new(
stream_id,
self.connect_type.get_stream_type(session_id),
));
}
}
}
pub fn remove_recv_stream(&mut self, stream_id: StreamId) {
self.recv_streams.remove(&stream_id);
}
pub fn remove_send_stream(&mut self, stream_id: StreamId) {
self.send_streams.remove(&stream_id);
}
#[must_use]
pub fn is_active(&self) -> bool {
matches!(self.state, SessionState::Active(_))
}
pub fn take_sub_streams(&mut self) -> Option<(BTreeSet<StreamId>, BTreeSet<StreamId>)> {
Some((
mem::take(&mut self.recv_streams),
mem::take(&mut self.send_streams),
))
}
}
impl RecvStreamEvents for Rc<RefCell<ExtendedConnectSession>> {
fn data_readable(&self, stream_info: Http3StreamInfo) {
// A session request is not expected to receive any data. This may change in
// the future.
self.borrow_mut().close(
stream_info.stream_id(),
CloseType::LocalError(Error::HttpGeneralProtocolStream.code()),
);
}
fn recv_closed(&self, stream_info: Http3StreamInfo, close_type: CloseType) {
self.borrow_mut().close(stream_info.stream_id(), close_type);
}
}
impl HttpRecvStreamEvents for Rc<RefCell<ExtendedConnectSession>> {
fn header_ready(
&self,
stream_info: Http3StreamInfo,
headers: Vec<Header>,
_interim: bool,
_fin: bool,
) {
qtrace!("ExtendedConnect response headers {:?}", headers);
self.borrow_mut().negotiation_done(
stream_info.stream_id(),
headers
.iter()
.find_map(|h| {
if h.name() == ":status" && h.value() == "200" {
Some(())
} else {
None
}
})
.is_some(),
);
}
}
impl SendStreamEvents for Rc<RefCell<ExtendedConnectSession>> {
fn data_writable(&self, _stream_info: Http3StreamInfo) {}
/// Add a new `StopSending` event
fn send_closed(&self, stream_info: Http3StreamInfo, close_type: CloseType) {
self.borrow_mut().close(stream_info.stream_id(), close_type);
}
}

View file

@ -0,0 +1,505 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::module_name_repetitions)]
use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason};
use crate::{
frames::{FrameReader, StreamReaderRecvStreamWrapper, WebTransportFrame},
recv_message::{RecvMessage, RecvMessageInfo},
send_message::SendMessage,
CloseType, Error, HFrame, Http3StreamInfo, Http3StreamType, HttpRecvStream,
HttpRecvStreamEvents, Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents,
Res, SendStream, SendStreamEvents, Stream,
};
use neqo_common::{qtrace, Encoder, Header, MessageType, Role};
use neqo_qpack::{QPackDecoder, QPackEncoder};
use neqo_transport::{Connection, StreamId};
use std::any::Any;
use std::cell::RefCell;
use std::collections::BTreeSet;
use std::mem;
use std::rc::Rc;
#[derive(Debug, PartialEq)]
enum SessionState {
Negotiating,
Active,
FinPending,
Done,
}
impl SessionState {
pub fn closing_state(&self) -> bool {
matches!(self, Self::FinPending | Self::Done)
}
}
#[derive(Debug)]
pub struct WebTransportSession {
control_stream_recv: Box<dyn RecvStream>,
control_stream_send: Box<dyn SendStream>,
stream_event_listener: Rc<RefCell<WebTransportSessionListener>>,
session_id: StreamId,
state: SessionState,
frame_reader: FrameReader,
events: Box<dyn ExtendedConnectEvents>,
send_streams: BTreeSet<StreamId>,
recv_streams: BTreeSet<StreamId>,
role: Role,
}
impl ::std::fmt::Display for WebTransportSession {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "WebTransportSession session={}", self.session_id,)
}
}
impl WebTransportSession {
#[must_use]
pub fn new(
session_id: StreamId,
events: Box<dyn ExtendedConnectEvents>,
role: Role,
qpack_encoder: Rc<RefCell<QPackEncoder>>,
qpack_decoder: Rc<RefCell<QPackDecoder>>,
) -> Self {
let stream_event_listener = Rc::new(RefCell::new(WebTransportSessionListener::default()));
Self {
control_stream_recv: Box::new(RecvMessage::new(
&RecvMessageInfo {
message_type: MessageType::Response,
stream_type: Http3StreamType::ExtendedConnect,
stream_id: session_id,
header_frame_type_read: false,
},
qpack_decoder,
Box::new(stream_event_listener.clone()),
None,
PriorityHandler::new(false, Priority::default()),
)),
control_stream_send: Box::new(SendMessage::new(
MessageType::Request,
Http3StreamType::ExtendedConnect,
session_id,
qpack_encoder,
Box::new(stream_event_listener.clone()),
)),
stream_event_listener,
session_id,
state: SessionState::Negotiating,
frame_reader: FrameReader::new(),
events,
send_streams: BTreeSet::new(),
recv_streams: BTreeSet::new(),
role,
}
}
/// # Panics
/// This function is only called with `RecvStream` and `SendStream` that also implement
/// the http specific functions and `http_stream()` will never return `None`.
#[must_use]
pub fn new_with_http_streams(
session_id: StreamId,
events: Box<dyn ExtendedConnectEvents>,
role: Role,
mut control_stream_recv: Box<dyn RecvStream>,
mut control_stream_send: Box<dyn SendStream>,
) -> Self {
let stream_event_listener = Rc::new(RefCell::new(WebTransportSessionListener::default()));
control_stream_recv
.http_stream()
.unwrap()
.set_new_listener(Box::new(stream_event_listener.clone()));
control_stream_send
.http_stream()
.unwrap()
.set_new_listener(Box::new(stream_event_listener.clone()));
Self {
control_stream_recv,
control_stream_send,
stream_event_listener,
session_id,
state: SessionState::Active,
frame_reader: FrameReader::new(),
events,
send_streams: BTreeSet::new(),
recv_streams: BTreeSet::new(),
role,
}
}
/// # Errors
/// The function can only fail if supplied headers are not valid http headers.
/// # Panics
/// `control_stream_send` implements the http specific functions and `http_stream()`
/// will never return `None`.
pub fn send_request(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()> {
self.control_stream_send
.http_stream()
.unwrap()
.send_headers(headers, conn)
}
fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
qtrace!([self], "receive control data");
let (out, _) = self.control_stream_recv.receive(conn)?;
debug_assert!(out == ReceiveOutput::NoOutput);
self.maybe_check_headers();
self.read_control_stream(conn)?;
Ok((ReceiveOutput::NoOutput, self.state == SessionState::Done))
}
fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
let (out, _) = self
.control_stream_recv
.http_stream()
.unwrap()
.header_unblocked(conn)?;
debug_assert!(out == ReceiveOutput::NoOutput);
self.maybe_check_headers();
self.read_control_stream(conn)?;
Ok((ReceiveOutput::NoOutput, self.state == SessionState::Done))
}
fn maybe_update_priority(&mut self, priority: Priority) -> bool {
self.control_stream_recv
.http_stream()
.unwrap()
.maybe_update_priority(priority)
}
fn priority_update_frame(&mut self) -> Option<HFrame> {
self.control_stream_recv
.http_stream()
.unwrap()
.priority_update_frame()
}
fn priority_update_sent(&mut self) {
self.control_stream_recv
.http_stream()
.unwrap()
.priority_update_sent();
}
fn send(&mut self, conn: &mut Connection) -> Res<()> {
self.control_stream_send.send(conn)?;
if self.control_stream_send.done() {
self.state = SessionState::Done;
}
Ok(())
}
fn has_data_to_send(&self) -> bool {
self.control_stream_send.has_data_to_send()
}
fn done(&self) -> bool {
self.state == SessionState::Done
}
fn close(&mut self, close_type: CloseType) {
if self.state.closing_state() {
return;
}
qtrace!("ExtendedConnect close the session");
self.state = SessionState::Done;
if let CloseType::ResetApp(_) = close_type {
return;
}
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::from(close_type),
);
}
/// # Panics
/// This cannot panic because headers are checked before this function called.
pub fn maybe_check_headers(&mut self) {
if SessionState::Negotiating != self.state {
return;
}
if let Some((headers, interim, fin)) = self.stream_event_listener.borrow_mut().get_headers()
{
qtrace!(
"ExtendedConnect response headers {:?}, fin={}",
headers,
fin
);
if interim {
if fin {
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::Clean {
error: 0,
message: "".to_string(),
},
);
self.state = SessionState::Done;
}
} else {
let status = headers
.iter()
.find_map(|h| {
if h.name() == ":status" {
h.value().parse::<u16>().ok()
} else {
None
}
})
.unwrap();
self.state = if (200..300).contains(&status) {
if fin {
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::Clean {
error: 0,
message: "".to_string(),
},
);
SessionState::Done
} else {
self.events.session_start(
ExtendedConnectType::WebTransport,
self.session_id,
status,
);
SessionState::Active
}
} else {
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::Status(status),
);
SessionState::Done
};
}
}
}
pub fn add_stream(&mut self, stream_id: StreamId) {
if let SessionState::Active = self.state {
if stream_id.is_bidi() {
self.send_streams.insert(stream_id);
self.recv_streams.insert(stream_id);
} else if stream_id.is_self_initiated(self.role) {
self.send_streams.insert(stream_id);
} else {
self.recv_streams.insert(stream_id);
}
if !stream_id.is_self_initiated(self.role) {
self.events
.extended_connect_new_stream(Http3StreamInfo::new(
stream_id,
ExtendedConnectType::WebTransport.get_stream_type(self.session_id),
));
}
}
}
pub fn remove_recv_stream(&mut self, stream_id: StreamId) {
self.recv_streams.remove(&stream_id);
}
pub fn remove_send_stream(&mut self, stream_id: StreamId) {
self.send_streams.remove(&stream_id);
}
#[must_use]
pub fn is_active(&self) -> bool {
matches!(self.state, SessionState::Active)
}
pub fn take_sub_streams(&mut self) -> Option<(BTreeSet<StreamId>, BTreeSet<StreamId>)> {
Some((
mem::take(&mut self.recv_streams),
mem::take(&mut self.send_streams),
))
}
/// # Errors
/// It may return an error if the frame is not correctly decoded.
pub fn read_control_stream(&mut self, conn: &mut Connection) -> Res<()> {
let (f, fin) = self
.frame_reader
.receive::<WebTransportFrame>(&mut StreamReaderRecvStreamWrapper::new(
conn,
&mut self.control_stream_recv,
))
.map_err(|_| Error::HttpGeneralProtocolStream)?;
qtrace!([self], "Received frame: {:?} fin={}", f, fin);
if let Some(WebTransportFrame::CloseSession { error, message }) = f {
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::Clean { error, message },
);
self.state = if fin {
SessionState::Done
} else {
SessionState::FinPending
};
} else if fin {
self.events.session_end(
ExtendedConnectType::WebTransport,
self.session_id,
SessionCloseReason::Clean {
error: 0,
message: "".to_string(),
},
);
self.state = SessionState::Done;
}
Ok(())
}
/// # Errors
/// Return an error if the stream was closed on the transport layer, but that information is not yet
/// consumed on the http/3 layer.
pub fn close_session(&mut self, conn: &mut Connection, error: u32, message: &str) -> Res<()> {
self.state = SessionState::Done;
let close_frame = WebTransportFrame::CloseSession {
error,
message: message.to_string(),
};
let mut encoder = Encoder::default();
close_frame.encode(&mut encoder);
self.control_stream_send.send_data_atomic(conn, &encoder)?;
self.control_stream_send.close(conn)?;
self.state = if self.control_stream_send.done() {
SessionState::Done
} else {
SessionState::FinPending
};
Ok(())
}
fn send_data(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<usize> {
self.control_stream_send.send_data(conn, buf)
}
}
impl Stream for Rc<RefCell<WebTransportSession>> {
fn stream_type(&self) -> Http3StreamType {
Http3StreamType::ExtendedConnect
}
}
impl RecvStream for Rc<RefCell<WebTransportSession>> {
fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
self.borrow_mut().receive(conn)
}
fn reset(&mut self, close_type: CloseType) -> Res<()> {
self.borrow_mut().close(close_type);
Ok(())
}
fn http_stream(&mut self) -> Option<&mut dyn HttpRecvStream> {
Some(self)
}
fn webtransport(&self) -> Option<Rc<RefCell<WebTransportSession>>> {
Some(self.clone())
}
}
impl HttpRecvStream for Rc<RefCell<WebTransportSession>> {
fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)> {
self.borrow_mut().header_unblocked(conn)
}
fn maybe_update_priority(&mut self, priority: Priority) -> bool {
self.borrow_mut().maybe_update_priority(priority)
}
fn priority_update_frame(&mut self) -> Option<HFrame> {
self.borrow_mut().priority_update_frame()
}
fn priority_update_sent(&mut self) {
self.borrow_mut().priority_update_sent();
}
fn any(&self) -> &dyn Any {
self
}
}
impl SendStream for Rc<RefCell<WebTransportSession>> {
fn send(&mut self, conn: &mut Connection) -> Res<()> {
self.borrow_mut().send(conn)
}
fn send_data(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<usize> {
self.borrow_mut().send_data(conn, buf)
}
fn has_data_to_send(&self) -> bool {
self.borrow_mut().has_data_to_send()
}
fn stream_writable(&self) {}
fn done(&self) -> bool {
self.borrow_mut().done()
}
fn close(&mut self, conn: &mut Connection) -> Res<()> {
self.borrow_mut().close_session(conn, 0, "")
}
fn close_with_message(&mut self, conn: &mut Connection, error: u32, message: &str) -> Res<()> {
self.borrow_mut().close_session(conn, error, message)
}
fn handle_stop_sending(&mut self, close_type: CloseType) {
self.borrow_mut().close(close_type);
}
}
#[derive(Debug, Default)]
struct WebTransportSessionListener {
headers: Option<(Vec<Header>, bool, bool)>,
}
impl WebTransportSessionListener {
fn set_headers(&mut self, headers: Vec<Header>, interim: bool, fin: bool) {
self.headers = Some((headers, interim, fin));
}
pub fn get_headers(&mut self) -> Option<(Vec<Header>, bool, bool)> {
mem::take(&mut self.headers)
}
}
impl RecvStreamEvents for Rc<RefCell<WebTransportSessionListener>> {}
impl HttpRecvStreamEvents for Rc<RefCell<WebTransportSessionListener>> {
fn header_ready(
&self,
_stream_info: Http3StreamInfo,
headers: Vec<Header>,
interim: bool,
fin: bool,
) {
if !interim || fin {
self.borrow_mut().set_headers(headers, interim, fin);
}
}
}
impl SendStreamEvents for Rc<RefCell<WebTransportSessionListener>> {}

View file

@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::ExtendedConnectSession;
use super::WebTransportSession;
use crate::{
CloseType, Http3StreamInfo, Http3StreamType, ReceiveOutput, RecvStream, RecvStreamEvents, Res,
SendStream, SendStreamEvents, Stream,
@ -21,7 +21,7 @@ pub const WEBTRANSPORT_STREAM: u64 = 0x41;
pub struct WebTransportRecvStream {
stream_id: StreamId,
events: Box<dyn RecvStreamEvents>,
session: Rc<RefCell<ExtendedConnectSession>>,
session: Rc<RefCell<WebTransportSession>>,
session_id: StreamId,
fin: bool,
}
@ -31,7 +31,7 @@ impl WebTransportRecvStream {
stream_id: StreamId,
session_id: StreamId,
events: Box<dyn RecvStreamEvents>,
session: Rc<RefCell<ExtendedConnectSession>>,
session: Rc<RefCell<WebTransportSession>>,
) -> Self {
Self {
stream_id,
@ -89,7 +89,7 @@ pub struct WebTransportSendStream {
stream_id: StreamId,
state: WebTransportSenderStreamState,
events: Box<dyn SendStreamEvents>,
session: Rc<RefCell<ExtendedConnectSession>>,
session: Rc<RefCell<WebTransportSession>>,
session_id: StreamId,
}
@ -98,7 +98,7 @@ impl WebTransportSendStream {
stream_id: StreamId,
session_id: StreamId,
events: Box<dyn SendStreamEvents>,
session: Rc<RefCell<ExtendedConnectSession>>,
session: Rc<RefCell<WebTransportSession>>,
local: bool,
) -> Self {
Self {

View file

@ -0,0 +1,226 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res};
use neqo_common::{Decoder, Encoder};
use neqo_crypto::random;
use neqo_transport::StreamId;
use std::fmt::Debug;
use std::io::Write;
pub(crate) type HFrameType = u64;
pub const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
pub const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
pub const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = 0x3;
pub const H3_FRAME_TYPE_SETTINGS: HFrameType = 0x4;
pub const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = 0x5;
pub const H3_FRAME_TYPE_GOAWAY: HFrameType = 0x7;
pub const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = 0xd;
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = 0xf0700;
pub const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = 0xf0701;
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[0x2, 0x6, 0x8, 0x9];
// data for DATA frame is not read into HFrame::Data.
#[derive(PartialEq, Debug)]
pub enum HFrame {
Data {
len: u64, // length of the data
},
Headers {
header_block: Vec<u8>,
},
CancelPush {
push_id: u64,
},
Settings {
settings: HSettings,
},
PushPromise {
push_id: u64,
header_block: Vec<u8>,
},
Goaway {
stream_id: StreamId,
},
MaxPushId {
push_id: u64,
},
Grease,
PriorityUpdateRequest {
element_id: u64,
priority: Priority,
},
PriorityUpdatePush {
element_id: u64,
priority: Priority,
},
}
impl HFrame {
fn get_type(&self) -> HFrameType {
match self {
Self::Data { .. } => H3_FRAME_TYPE_DATA,
Self::Headers { .. } => H3_FRAME_TYPE_HEADERS,
Self::CancelPush { .. } => H3_FRAME_TYPE_CANCEL_PUSH,
Self::Settings { .. } => H3_FRAME_TYPE_SETTINGS,
Self::PushPromise { .. } => H3_FRAME_TYPE_PUSH_PROMISE,
Self::Goaway { .. } => H3_FRAME_TYPE_GOAWAY,
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
Self::Grease => {
let r = random(7);
Decoder::from(&r).decode_uint(7).unwrap() * 0x1f + 0x21
}
}
}
pub fn encode(&self, enc: &mut Encoder) {
enc.encode_varint(self.get_type());
match self {
Self::Data { len } => {
// DATA frame only encode the length here.
enc.encode_varint(*len);
}
Self::Headers { header_block } => {
enc.encode_vvec(header_block);
}
Self::CancelPush { push_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(*push_id);
});
}
Self::Settings { settings } => {
settings.encode_frame_contents(enc);
}
Self::PushPromise {
push_id,
header_block,
} => {
enc.encode_varint((header_block.len() + (Encoder::varint_len(*push_id))) as u64);
enc.encode_varint(*push_id);
enc.encode(header_block);
}
Self::Goaway { stream_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(stream_id.as_u64());
});
}
Self::MaxPushId { push_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(*push_id);
});
}
Self::Grease => {
// Encode some number of random bytes.
let r = random(8);
enc.encode_vvec(&r[1..usize::from(1 + (r[0] & 0x7))]);
}
Self::PriorityUpdateRequest {
element_id,
priority,
}
| Self::PriorityUpdatePush {
element_id,
priority,
} => {
let mut update_frame = Encoder::new();
update_frame.encode_varint(*element_id);
let mut priority_enc: Vec<u8> = Vec::new();
write!(priority_enc, "{}", priority).unwrap();
update_frame.encode(&priority_enc);
enc.encode_varint(update_frame.len() as u64);
enc.encode(&update_frame);
}
}
}
}
impl FrameDecoder<HFrame> for HFrame {
fn frame_type_allowed(frame_type: u64) -> Res<()> {
if H3_RESERVED_FRAME_TYPES.contains(&frame_type) {
return Err(Error::HttpFrameUnexpected);
}
Ok(())
}
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<HFrame>> {
if frame_type == H3_FRAME_TYPE_DATA {
Ok(Some(HFrame::Data { len: frame_len }))
} else if let Some(payload) = data {
let mut dec = Decoder::from(payload);
Ok(match frame_type {
H3_FRAME_TYPE_DATA => unreachable!("DATA frame has been handled already."),
H3_FRAME_TYPE_HEADERS => Some(HFrame::Headers {
header_block: dec.decode_remainder().to_vec(),
}),
H3_FRAME_TYPE_CANCEL_PUSH => Some(HFrame::CancelPush {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
}),
H3_FRAME_TYPE_SETTINGS => {
let mut settings = HSettings::default();
settings.decode_frame_contents(&mut dec).map_err(|e| {
if e == Error::HttpSettings {
e
} else {
Error::HttpFrame
}
})?;
Some(HFrame::Settings { settings })
}
H3_FRAME_TYPE_PUSH_PROMISE => Some(HFrame::PushPromise {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
header_block: dec.decode_remainder().to_vec(),
}),
H3_FRAME_TYPE_GOAWAY => Some(HFrame::Goaway {
stream_id: StreamId::new(dec.decode_varint().ok_or(Error::HttpFrame)?),
}),
H3_FRAME_TYPE_MAX_PUSH_ID => Some(HFrame::MaxPushId {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
}),
H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST | H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH => {
let element_id = dec.decode_varint().ok_or(Error::HttpFrame)?;
let priority = dec.decode_remainder();
let priority = Priority::from_bytes(priority)?;
if frame_type == H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST {
Some(HFrame::PriorityUpdateRequest {
element_id,
priority,
})
} else {
Some(HFrame::PriorityUpdatePush {
element_id,
priority,
})
}
}
_ => None,
})
} else {
Ok(None)
}
}
fn is_known_type(frame_type: u64) -> bool {
matches!(
frame_type,
H3_FRAME_TYPE_DATA
| H3_FRAME_TYPE_HEADERS
| H3_FRAME_TYPE_CANCEL_PUSH
| H3_FRAME_TYPE_SETTINGS
| H3_FRAME_TYPE_PUSH_PROMISE
| H3_FRAME_TYPE_GOAWAY
| H3_FRAME_TYPE_MAX_PUSH_ID
| H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST
| H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH
)
}
}

View file

@ -0,0 +1,16 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod hframe;
pub mod reader;
pub mod wtframe;
pub use hframe::{HFrame, H3_FRAME_TYPE_HEADERS, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES};
pub use reader::{FrameReader, StreamReaderConnectionWrapper, StreamReaderRecvStreamWrapper};
pub use wtframe::WebTransportFrame;
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,270 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::module_name_repetitions)]
use crate::{Error, RecvStream, Res};
use neqo_common::{
hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore,
IncrementalDecoderUint,
};
use neqo_transport::{Connection, StreamId};
use std::convert::TryFrom;
use std::fmt::Debug;
const MAX_READ_SIZE: usize = 4096;
pub trait FrameDecoder<T> {
fn is_known_type(frame_type: u64) -> bool;
/// # Errors
/// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`.
fn frame_type_allowed(_frame_type: u64) -> Res<()> {
Ok(())
}
/// # Errors
/// If a frame cannot be properly decoded.
fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res<Option<T>>;
}
pub trait StreamReader {
/// # Errors
/// An error may happen while reading a stream, e.g. early close, protocol error, etc.
/// Return an error if the stream was closed on the transport layer, but that information is not yet
/// consumed on the http/3 layer.
fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)>;
}
pub struct StreamReaderConnectionWrapper<'a> {
conn: &'a mut Connection,
stream_id: StreamId,
}
impl<'a> StreamReaderConnectionWrapper<'a> {
pub fn new(conn: &'a mut Connection, stream_id: StreamId) -> Self {
Self { conn, stream_id }
}
}
impl<'a> StreamReader for StreamReaderConnectionWrapper<'a> {
/// # Errors
/// An error may happen while reading a stream, e.g. early close, protocol error, etc.
fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> {
let res = self.conn.stream_recv(self.stream_id, buf)?;
Ok(res)
}
}
pub struct StreamReaderRecvStreamWrapper<'a> {
recv_stream: &'a mut Box<dyn RecvStream>,
conn: &'a mut Connection,
}
impl<'a> StreamReaderRecvStreamWrapper<'a> {
pub fn new(conn: &'a mut Connection, recv_stream: &'a mut Box<dyn RecvStream>) -> Self {
Self { recv_stream, conn }
}
}
impl<'a> StreamReader for StreamReaderRecvStreamWrapper<'a> {
/// # Errors
/// An error may happen while reading a stream, e.g. early close, protocol error, etc.
fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> {
self.recv_stream.read_data(self.conn, buf)
}
}
#[derive(Clone, Debug)]
enum FrameReaderState {
GetType { decoder: IncrementalDecoderUint },
GetLength { decoder: IncrementalDecoderUint },
GetData { decoder: IncrementalDecoderBuffer },
UnknownFrameDischargeData { decoder: IncrementalDecoderIgnore },
}
#[allow(clippy::module_name_repetitions)]
#[derive(Debug)]
pub struct FrameReader {
state: FrameReaderState,
frame_type: u64,
frame_len: u64,
}
impl Default for FrameReader {
fn default() -> Self {
Self::new()
}
}
impl FrameReader {
#[must_use]
pub fn new() -> Self {
Self {
state: FrameReaderState::GetType {
decoder: IncrementalDecoderUint::default(),
},
frame_type: 0,
frame_len: 0,
}
}
#[must_use]
pub fn new_with_type(frame_type: u64) -> Self {
Self {
state: FrameReaderState::GetLength {
decoder: IncrementalDecoderUint::default(),
},
frame_type,
frame_len: 0,
}
}
fn reset(&mut self) {
self.state = FrameReaderState::GetType {
decoder: IncrementalDecoderUint::default(),
};
}
fn min_remaining(&self) -> usize {
match &self.state {
FrameReaderState::GetType { decoder } | FrameReaderState::GetLength { decoder } => {
decoder.min_remaining()
}
FrameReaderState::GetData { decoder } => decoder.min_remaining(),
FrameReaderState::UnknownFrameDischargeData { decoder } => decoder.min_remaining(),
}
}
fn decoding_in_progress(&self) -> bool {
if let FrameReaderState::GetType { decoder } = &self.state {
decoder.decoding_in_progress()
} else {
true
}
}
/// returns true if quic stream was closed.
/// # Errors
/// May return `HttpFrame` if a frame cannot be decoded.
/// and `TransportStreamDoesNotExist` if `stream_recv` fails.
pub fn receive<T: FrameDecoder<T>>(
&mut self,
stream_reader: &mut dyn StreamReader,
) -> Res<(Option<T>, bool)> {
loop {
let to_read = std::cmp::min(self.min_remaining(), MAX_READ_SIZE);
let mut buf = vec![0; to_read];
let (output, read, fin) = match stream_reader
.read_data(&mut buf)
.map_err(|e| Error::map_stream_recv_errors(&e))?
{
(0, f) => (None, false, f),
(amount, f) => {
qtrace!("FrameReader::receive: reading {} byte, fin={}", amount, f);
(self.consume::<T>(Decoder::from(&buf[..amount]))?, true, f)
}
};
if output.is_some() {
break Ok((output, fin));
}
if fin {
if self.decoding_in_progress() {
break Err(Error::HttpFrame);
}
break Ok((None, fin));
}
if !read {
// There was no new data, exit the loop.
break Ok((None, false));
}
}
}
/// # Errors
/// May return `HttpFrame` if a frame cannot be decoded.
fn consume<T: FrameDecoder<T>>(&mut self, mut input: Decoder) -> Res<Option<T>> {
match &mut self.state {
FrameReaderState::GetType { decoder } => {
if let Some(v) = decoder.consume(&mut input) {
qtrace!("FrameReader::receive: read frame type {}", v);
self.frame_type_decoded::<T>(v)?;
}
}
FrameReaderState::GetLength { decoder } => {
if let Some(len) = decoder.consume(&mut input) {
qtrace!(
"FrameReader::receive: frame type {} length {}",
self.frame_type,
len
);
return self.frame_length_decoded::<T>(len);
}
}
FrameReaderState::GetData { decoder } => {
if let Some(data) = decoder.consume(&mut input) {
qtrace!(
"received frame {}: {}",
self.frame_type,
hex_with_len(&data[..])
);
return self.frame_data_decoded::<T>(&data);
}
}
FrameReaderState::UnknownFrameDischargeData { decoder } => {
if decoder.consume(&mut input) {
self.reset();
}
}
}
Ok(None)
}
}
impl FrameReader {
fn frame_type_decoded<T: FrameDecoder<T>>(&mut self, frame_type: u64) -> Res<()> {
T::frame_type_allowed(frame_type)?;
self.frame_type = frame_type;
self.state = FrameReaderState::GetLength {
decoder: IncrementalDecoderUint::default(),
};
Ok(())
}
fn frame_length_decoded<T: FrameDecoder<T>>(&mut self, len: u64) -> Res<Option<T>> {
self.frame_len = len;
if let Some(f) = T::decode(
self.frame_type,
self.frame_len,
if len > 0 { None } else { Some(&[]) },
)? {
self.reset();
return Ok(Some(f));
} else if T::is_known_type(self.frame_type) {
self.state = FrameReaderState::GetData {
decoder: IncrementalDecoderBuffer::new(
usize::try_from(len).or(Err(Error::HttpFrame))?,
),
};
} else if self.frame_len == 0 {
self.reset();
} else {
self.state = FrameReaderState::UnknownFrameDischargeData {
decoder: IncrementalDecoderIgnore::new(
usize::try_from(len).or(Err(Error::HttpFrame))?,
),
};
}
Ok(None)
}
fn frame_data_decoded<T: FrameDecoder<T>>(&mut self, data: &[u8]) -> Res<Option<T>> {
let res = T::decode(self.frame_type, self.frame_len, Some(data))?;
self.reset();
Ok(res)
}
}

View file

@ -0,0 +1,115 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::enc_dec_hframe;
use crate::{
frames::HFrame,
settings::{HSetting, HSettingType, HSettings},
Priority,
};
use neqo_common::{Decoder, Encoder};
use neqo_transport::StreamId;
use test_fixture::fixture_init;
#[test]
fn test_data_frame() {
let f = HFrame::Data { len: 3 };
enc_dec_hframe(&f, "0003010203", 3);
}
#[test]
fn test_headers_frame() {
let f = HFrame::Headers {
header_block: vec![0x01, 0x02, 0x03],
};
enc_dec_hframe(&f, "0103010203", 0);
}
#[test]
fn test_cancel_push_frame4() {
let f = HFrame::CancelPush { push_id: 5 };
enc_dec_hframe(&f, "030105", 0);
}
#[test]
fn test_settings_frame4() {
let f = HFrame::Settings {
settings: HSettings::new(&[HSetting::new(HSettingType::MaxHeaderListSize, 4)]),
};
enc_dec_hframe(&f, "04020604", 0);
}
#[test]
fn test_push_promise_frame4() {
let f = HFrame::PushPromise {
push_id: 4,
header_block: vec![0x61, 0x62, 0x63, 0x64],
};
enc_dec_hframe(&f, "05050461626364", 0);
}
#[test]
fn test_goaway_frame4() {
let f = HFrame::Goaway {
stream_id: StreamId::new(5),
};
enc_dec_hframe(&f, "070105", 0);
}
#[test]
fn grease() {
fn make_grease() -> u64 {
let mut enc = Encoder::default();
HFrame::Grease.encode(&mut enc);
let mut dec = Decoder::from(&enc);
let ft = dec.decode_varint().unwrap();
assert_eq!((ft - 0x21) % 0x1f, 0);
let body = dec.decode_vvec().unwrap();
assert!(body.len() <= 7);
ft
}
fixture_init();
let t1 = make_grease();
let t2 = make_grease();
assert_ne!(t1, t2);
}
#[test]
fn test_priority_update_request_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 6,
priority: Priority::default(),
};
enc_dec_hframe(&f, "800f07000106", 0);
}
#[test]
fn test_priority_update_request_incremental_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 7,
priority: Priority::new(6, false),
};
enc_dec_hframe(&f, "800f07000407753d36", 0); // "u=6"
}
#[test]
fn test_priority_update_request_urgency_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 8,
priority: Priority::new(3, true),
};
enc_dec_hframe(&f, "800f0700020869", 0); // "i"
}
#[test]
fn test_priority_update_push_default() {
let f = HFrame::PriorityUpdatePush {
element_id: 10,
priority: Priority::default(),
};
enc_dec_hframe(&f, "800f0701010a", 0);
}

View file

@ -0,0 +1,82 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::frames::{
reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame,
};
use neqo_common::Encoder;
use neqo_crypto::AuthenticationStatus;
use neqo_transport::StreamType;
use std::mem;
use test_fixture::{default_client, default_server, now};
#[allow(clippy::many_single_char_names)]
pub fn enc_dec<T: FrameDecoder<T>>(d: &Encoder, st: &str, remaining: usize) -> T {
// For data, headers and push_promise we do not read all bytes from the buffer
let d2 = Encoder::from_hex(st);
assert_eq!(&d[..], &d2[..d.len()]);
let mut conn_c = default_client();
let mut conn_s = default_server();
let out = conn_c.process(None, now());
let out = conn_s.process(out.dgram(), now());
let out = conn_c.process(out.dgram(), now());
mem::drop(conn_s.process(out.dgram(), now()));
conn_c.authenticated(AuthenticationStatus::Ok, now());
let out = conn_c.process(None, now());
mem::drop(conn_s.process(out.dgram(), now()));
// create a stream
let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap();
let mut fr: FrameReader = FrameReader::new();
// conver string into u8 vector
let buf = Encoder::from_hex(st);
conn_s.stream_send(stream_id, &buf[..]).unwrap();
let out = conn_s.process(None, now());
mem::drop(conn_c.process(out.dgram(), now()));
let (frame, fin) = fr
.receive::<T>(&mut StreamReaderConnectionWrapper::new(
&mut conn_c,
stream_id,
))
.unwrap();
assert!(!fin);
assert!(frame.is_some());
// Check remaining data.
let mut buf = [0_u8; 100];
let (amount, _) = conn_c.stream_recv(stream_id, &mut buf).unwrap();
assert_eq!(amount, remaining);
frame.unwrap()
}
pub fn enc_dec_hframe(f: &HFrame, st: &str, remaining: usize) {
let mut d = Encoder::default();
f.encode(&mut d);
let frame = enc_dec::<HFrame>(&d, st, remaining);
assert_eq!(*f, frame);
}
pub fn enc_dec_wtframe(f: &WebTransportFrame, st: &str, remaining: usize) {
let mut d = Encoder::default();
f.encode(&mut d);
let frame = enc_dec::<WebTransportFrame>(&d, st, remaining);
assert_eq!(*f, frame);
}
mod hframe;
mod reader;
mod wtframe;

View file

@ -0,0 +1,517 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{
frames::{
reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame,
},
settings::{HSetting, HSettingType, HSettings},
Error,
};
use neqo_common::Encoder;
use neqo_transport::{Connection, StreamId, StreamType};
use std::fmt::Debug;
use std::mem;
use test_fixture::{connect, now};
struct FrameReaderTest {
pub fr: FrameReader,
pub conn_c: Connection,
pub conn_s: Connection,
pub stream_id: StreamId,
}
impl FrameReaderTest {
pub fn new() -> Self {
let (conn_c, mut conn_s) = connect();
let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap();
Self {
fr: FrameReader::new(),
conn_c,
conn_s,
stream_id,
}
}
fn process<T: FrameDecoder<T>>(&mut self, v: &[u8]) -> Option<T> {
self.conn_s.stream_send(self.stream_id, v).unwrap();
let out = self.conn_s.process(None, now());
mem::drop(self.conn_c.process(out.dgram(), now()));
let (frame, fin) = self
.fr
.receive::<T>(&mut StreamReaderConnectionWrapper::new(
&mut self.conn_c,
self.stream_id,
))
.unwrap();
assert!(!fin);
frame
}
}
// Test receiving byte by byte for a SETTINGS frame.
#[test]
fn test_frame_reading_with_stream_settings1() {
let mut fr = FrameReaderTest::new();
// Send and read settings frame 040406040804
assert!(fr.process::<HFrame>(&[0x4]).is_none());
assert!(fr.process::<HFrame>(&[0x4]).is_none());
assert!(fr.process::<HFrame>(&[0x6]).is_none());
assert!(fr.process::<HFrame>(&[0x4]).is_none());
assert!(fr.process::<HFrame>(&[0x8]).is_none());
let frame = fr.process(&[0x4]);
assert!(frame.is_some());
if let HFrame::Settings { settings } = frame.unwrap() {
assert!(settings.len() == 1);
assert!(settings[0] == HSetting::new(HSettingType::MaxHeaderListSize, 4));
} else {
panic!("wrong frame type");
}
}
// Test receiving byte by byte for a SETTINGS frame with larger varints
#[test]
fn test_frame_reading_with_stream_settings2() {
let mut fr = FrameReaderTest::new();
// Read settings frame 400406064004084100
for i in &[0x40, 0x04, 0x06, 0x06, 0x40, 0x04, 0x08, 0x41] {
assert!(fr.process::<HFrame>(&[*i]).is_none());
}
let frame = fr.process(&[0x0]);
assert!(frame.is_some());
if let HFrame::Settings { settings } = frame.unwrap() {
assert!(settings.len() == 1);
assert!(settings[0] == HSetting::new(HSettingType::MaxHeaderListSize, 4));
} else {
panic!("wrong frame type");
}
}
// Test receiving byte by byte for a PUSH_PROMISE frame.
#[test]
fn test_frame_reading_with_stream_push_promise() {
let mut fr = FrameReaderTest::new();
// Read push-promise frame 05054101010203
for i in &[0x05, 0x05, 0x41, 0x01, 0x01, 0x02] {
assert!(fr.process::<HFrame>(&[*i]).is_none());
}
let frame = fr.process(&[0x3]);
assert!(frame.is_some());
if let HFrame::PushPromise {
push_id,
header_block,
} = frame.unwrap()
{
assert_eq!(push_id, 257);
assert_eq!(header_block, &[0x1, 0x2, 0x3]);
} else {
panic!("wrong frame type");
}
}
// Test DATA
#[test]
fn test_frame_reading_with_stream_data() {
let mut fr = FrameReaderTest::new();
// Read data frame 0003010203
let frame = fr.process(&[0x0, 0x3, 0x1, 0x2, 0x3]).unwrap();
assert!(matches!(frame, HFrame::Data { len } if len == 3));
// payloead is still on the stream.
// assert that we have 3 bytes in the stream
let mut buf = [0_u8; 100];
let (amount, _) = fr.conn_c.stream_recv(fr.stream_id, &mut buf).unwrap();
assert_eq!(amount, 3);
}
// Test an unknown frame
#[test]
fn test_unknown_frame() {
// Construct an unknown frame.
const UNKNOWN_FRAME_LEN: usize = 832;
let mut fr = FrameReaderTest::new();
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
assert!(fr.process::<HFrame>(&buf).is_none());
// now receive a CANCEL_PUSH fram to see that frame reader is ok.
let frame = fr.process(&[0x03, 0x01, 0x05]);
assert!(frame.is_some());
if let HFrame::CancelPush { push_id } = frame.unwrap() {
assert!(push_id == 5);
} else {
panic!("wrong frame type");
}
}
// Test receiving byte by byte for a WT_FRAME_CLOSE_SESSION frame.
#[test]
fn test_frame_reading_with_stream_wt_close_session() {
let mut fr = FrameReaderTest::new();
// Read CloseSession frame 6843090000000548656c6c6f
for i in &[
0x68, 0x43, 0x09, 0x00, 0x00, 0x00, 0x05, 0x48, 0x65, 0x6c, 0x6c,
] {
assert!(fr.process::<WebTransportFrame>(&[*i]).is_none());
}
let frame = fr.process::<WebTransportFrame>(&[0x6f]);
assert!(frame.is_some());
let WebTransportFrame::CloseSession { error, message } = frame.unwrap();
assert_eq!(error, 5);
assert_eq!(message, "Hello".to_string());
}
// Test an unknown frame for WebTransportFrames.
#[test]
fn test_unknown_wt_frame() {
// Construct an unknown frame.
const UNKNOWN_FRAME_LEN: usize = 832;
let mut fr = FrameReaderTest::new();
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
assert!(fr.process::<WebTransportFrame>(&buf).is_none());
// now receive a WT_FRAME_CLOSE_SESSION fram to see that frame reader is ok.
let frame = fr.process(&[
0x68, 0x43, 0x09, 0x00, 0x00, 0x00, 0x05, 0x48, 0x65, 0x6c, 0x6c, 0x6f,
]);
assert!(frame.is_some());
let WebTransportFrame::CloseSession { error, message } = frame.unwrap();
assert_eq!(error, 5);
assert_eq!(message, "Hello".to_string());
}
enum FrameReadingTestSend {
OnlyData,
DataWithFin,
DataThenFin,
}
enum FrameReadingTestExpect {
Error,
Incomplete,
FrameComplete,
FrameAndStreamComplete,
StreamDoneWithoutFrame,
}
fn test_reading_frame<T: FrameDecoder<T> + PartialEq + Debug>(
buf: &[u8],
test_to_send: &FrameReadingTestSend,
expected_result: &FrameReadingTestExpect,
) {
let mut fr = FrameReaderTest::new();
fr.conn_s.stream_send(fr.stream_id, buf).unwrap();
if let FrameReadingTestSend::DataWithFin = test_to_send {
fr.conn_s.stream_close_send(fr.stream_id).unwrap();
}
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
if let FrameReadingTestSend::DataThenFin = test_to_send {
fr.conn_s.stream_close_send(fr.stream_id).unwrap();
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
}
let rv = fr.fr.receive::<T>(&mut StreamReaderConnectionWrapper::new(
&mut fr.conn_c,
fr.stream_id,
));
match expected_result {
FrameReadingTestExpect::Error => assert_eq!(Err(Error::HttpFrame), rv),
FrameReadingTestExpect::Incomplete => {
assert_eq!(Ok((None, false)), rv);
}
FrameReadingTestExpect::FrameComplete => {
let (f, fin) = rv.unwrap();
assert!(!fin);
assert!(f.is_some());
}
FrameReadingTestExpect::FrameAndStreamComplete => {
let (f, fin) = rv.unwrap();
assert!(fin);
assert!(f.is_some());
}
FrameReadingTestExpect::StreamDoneWithoutFrame => {
let (f, fin) = rv.unwrap();
assert!(fin);
assert!(f.is_none());
}
};
}
#[test]
fn test_complete_and_incomplete_unknown_frame() {
// Construct an unknown frame.
const UNKNOWN_FRAME_LEN: usize = 832;
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
let len = std::cmp::min(buf.len() - 1, 10);
for i in 1..len {
test_reading_frame::<HFrame>(
&buf[..i],
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::Incomplete,
);
test_reading_frame::<HFrame>(
&buf[..i],
&FrameReadingTestSend::DataWithFin,
&FrameReadingTestExpect::Error,
);
test_reading_frame::<HFrame>(
&buf[..i],
&FrameReadingTestSend::DataThenFin,
&FrameReadingTestExpect::Error,
);
}
test_reading_frame::<HFrame>(
&buf,
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::Incomplete,
);
test_reading_frame::<HFrame>(
&buf,
&FrameReadingTestSend::DataWithFin,
&FrameReadingTestExpect::StreamDoneWithoutFrame,
);
test_reading_frame::<HFrame>(
&buf,
&FrameReadingTestSend::DataThenFin,
&FrameReadingTestExpect::StreamDoneWithoutFrame,
);
}
// if we read more than done_state bytes FrameReader will be in done state.
fn test_complete_and_incomplete_frame<T: FrameDecoder<T> + PartialEq + Debug>(
buf: &[u8],
done_state: usize,
) {
use std::cmp::Ordering;
// Let's consume partial frames. It is enough to test partal frames
// up to 10 byte. 10 byte is greater than frame type and frame
// length and bit of data.
let len = std::cmp::min(buf.len() - 1, 10);
for i in 1..len {
test_reading_frame::<T>(
&buf[..i],
&FrameReadingTestSend::OnlyData,
if i >= done_state {
&FrameReadingTestExpect::FrameComplete
} else {
&FrameReadingTestExpect::Incomplete
},
);
test_reading_frame::<T>(
&buf[..i],
&FrameReadingTestSend::DataWithFin,
match i.cmp(&done_state) {
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
Ordering::Less => &FrameReadingTestExpect::Error,
},
);
test_reading_frame::<T>(
&buf[..i],
&FrameReadingTestSend::DataThenFin,
match i.cmp(&done_state) {
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
Ordering::Less => &FrameReadingTestExpect::Error,
},
);
}
test_reading_frame::<T>(
buf,
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::FrameComplete,
);
test_reading_frame::<T>(
buf,
&FrameReadingTestSend::DataWithFin,
if buf.len() == done_state {
&FrameReadingTestExpect::FrameAndStreamComplete
} else {
&FrameReadingTestExpect::FrameComplete
},
);
test_reading_frame::<T>(
buf,
&FrameReadingTestSend::DataThenFin,
if buf.len() == done_state {
&FrameReadingTestExpect::FrameAndStreamComplete
} else {
&FrameReadingTestExpect::FrameComplete
},
);
}
#[test]
fn test_complete_and_incomplete_frames() {
const FRAME_LEN: usize = 10;
const HEADER_BLOCK: &[u8] = &[0x01, 0x02, 0x03, 0x04];
// H3_FRAME_TYPE_DATA len=0
let f = HFrame::Data { len: 0 };
let mut enc = Encoder::with_capacity(2);
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, 2);
// H3_FRAME_TYPE_DATA len=FRAME_LEN
let f = HFrame::Data {
len: FRAME_LEN as u64,
};
let mut enc = Encoder::with_capacity(2);
f.encode(&mut enc);
let mut buf: Vec<_> = enc.into();
buf.resize(FRAME_LEN + buf.len(), 0);
test_complete_and_incomplete_frame::<HFrame>(&buf, 2);
// H3_FRAME_TYPE_HEADERS empty header block
let f = HFrame::Headers {
header_block: Vec::new(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, 2);
// H3_FRAME_TYPE_HEADERS
let f = HFrame::Headers {
header_block: HEADER_BLOCK.to_vec(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
// H3_FRAME_TYPE_CANCEL_PUSH
let f = HFrame::CancelPush { push_id: 5 };
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
// H3_FRAME_TYPE_SETTINGS
let f = HFrame::Settings {
settings: HSettings::new(&[HSetting::new(HSettingType::MaxHeaderListSize, 4)]),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
// H3_FRAME_TYPE_PUSH_PROMISE
let f = HFrame::PushPromise {
push_id: 4,
header_block: HEADER_BLOCK.to_vec(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
// H3_FRAME_TYPE_GOAWAY
let f = HFrame::Goaway {
stream_id: StreamId::new(5),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
// H3_FRAME_TYPE_MAX_PUSH_ID
let f = HFrame::MaxPushId { push_id: 5 };
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<HFrame>(&buf, buf.len());
}
#[test]
fn test_complete_and_incomplete_wt_frames() {
// H3_FRAME_TYPE_MAX_PUSH_ID
let f = WebTransportFrame::CloseSession {
error: 5,
message: "Hello".to_string(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame::<WebTransportFrame>(&buf, buf.len());
}
// Test closing a stream before any frame is sent should not cause an error.
#[test]
fn test_frame_reading_when_stream_is_closed_before_sending_data() {
let mut fr = FrameReaderTest::new();
fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap();
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id));
let out = fr.conn_c.process(None, now());
mem::drop(fr.conn_s.process(out.dgram(), now()));
assert_eq!(
Ok((None, true)),
fr.fr
.receive::<HFrame>(&mut StreamReaderConnectionWrapper::new(
&mut fr.conn_s,
fr.stream_id
))
);
}
// Test closing a stream before any frame is sent should not cause an error.
// This is the same as the previous just for WebTransportFrame.
#[test]
fn test_wt_frame_reading_when_stream_is_closed_before_sending_data() {
let mut fr = FrameReaderTest::new();
fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap();
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id));
let out = fr.conn_c.process(None, now());
mem::drop(fr.conn_s.process(out.dgram(), now()));
assert_eq!(
Ok((None, true)),
fr.fr
.receive::<WebTransportFrame>(&mut StreamReaderConnectionWrapper::new(
&mut fr.conn_s,
fr.stream_id
))
);
}

View file

@ -0,0 +1,17 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::enc_dec_wtframe;
use crate::frames::WebTransportFrame;
#[test]
fn test_wt_close_session() {
let f = WebTransportFrame::CloseSession {
error: 5,
message: "Hello".to_string(),
};
enc_dec_wtframe(&f, "6843090000000548656c6c6f", 0);
}

View file

@ -0,0 +1,61 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{frames::reader::FrameDecoder, Error, Res};
use neqo_common::{Decoder, Encoder};
use std::convert::TryFrom;
pub(crate) type WebTransportFrameType = u64;
const WT_FRAME_CLOSE_SESSION: WebTransportFrameType = 0x2843;
const WT_FRAME_CLOSE_MAX_MESSAGE_SIZE: u64 = 1024;
#[derive(PartialEq, Debug)]
pub enum WebTransportFrame {
CloseSession { error: u32, message: String },
}
impl WebTransportFrame {
pub fn encode(&self, enc: &mut Encoder) {
enc.encode_varint(WT_FRAME_CLOSE_SESSION);
let WebTransportFrame::CloseSession { error, message } = &self;
enc.encode_varint(4 + message.len() as u64);
enc.encode_uint(4, *error);
enc.encode(message.as_bytes());
}
}
impl FrameDecoder<WebTransportFrame> for WebTransportFrame {
fn decode(
frame_type: u64,
frame_len: u64,
data: Option<&[u8]>,
) -> Res<Option<WebTransportFrame>> {
if let Some(payload) = data {
let mut dec = Decoder::from(payload);
if frame_type == WT_FRAME_CLOSE_SESSION {
if frame_len > WT_FRAME_CLOSE_MAX_MESSAGE_SIZE + 4 {
return Err(Error::HttpMessageError);
}
let error =
u32::try_from(dec.decode_uint(4).ok_or(Error::HttpMessageError)?).unwrap();
let message = match String::from_utf8(dec.decode_remainder().to_vec()) {
Ok(s) => s,
Err(_) => return Err(Error::HttpMessageError),
};
Ok(Some(WebTransportFrame::CloseSession { error, message }))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
fn is_known_type(frame_type: u64) -> bool {
frame_type == WT_FRAME_CLOSE_SESSION
}
}

View file

@ -4,53 +4,83 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{Error, MessageType, Res};
use neqo_common::Header;
#![allow(clippy::expl_impl_clone_on_copy)] // see https://github.com/Lymia/enumset/issues/28
const PSEUDO_HEADER_STATUS: u8 = 0x1;
const PSEUDO_HEADER_METHOD: u8 = 0x2;
const PSEUDO_HEADER_SCHEME: u8 = 0x4;
const PSEUDO_HEADER_AUTHORITY: u8 = 0x8;
const PSEUDO_HEADER_PATH: u8 = 0x10;
const PSEUDO_HEADER_PROTOCOL: u8 = 0x20;
const REGULAR_HEADER: u8 = 0x80;
use crate::{Error, MessageType, Res};
use enumset::{enum_set, EnumSet, EnumSetType};
use neqo_common::Header;
use std::convert::TryFrom;
#[derive(EnumSetType, Debug)]
enum PseudoHeaderState {
Status,
Method,
Scheme,
Authority,
Path,
Protocol,
Regular,
}
impl PseudoHeaderState {
fn is_pseudo(self) -> bool {
self != Self::Regular
}
}
impl TryFrom<(MessageType, &str)> for PseudoHeaderState {
type Error = Error;
fn try_from(v: (MessageType, &str)) -> Res<Self> {
match v {
(MessageType::Response, ":status") => Ok(Self::Status),
(MessageType::Request, ":method") => Ok(Self::Method),
(MessageType::Request, ":scheme") => Ok(Self::Scheme),
(MessageType::Request, ":authority") => Ok(Self::Authority),
(MessageType::Request, ":path") => Ok(Self::Path),
(MessageType::Request, ":protocol") => Ok(Self::Protocol),
(_, _) => Err(Error::InvalidHeader),
}
}
}
/// Check whether the response is informational(1xx).
/// # Errors
/// Returns an error if response headers do not contain
/// a status header or if the value of the header cannot be parsed.
/// a status header or if the value of the header is 101 or cannot be parsed.
pub fn is_interim(headers: &[Header]) -> Res<bool> {
let status = headers.iter().take(1).find(|h| h.name() == ":status");
if let Some(h) = status {
#[allow(clippy::map_err_ignore)]
let status_code = h.value().parse::<i32>().map_err(|_| Error::InvalidHeader)?;
if status_code == 101 {
// https://datatracker.ietf.org/doc/html/draft-ietf-quic-http#section-4.3
Err(Error::InvalidHeader)
} else {
Ok((100..200).contains(&status_code))
}
} else {
Err(Error::InvalidHeader)
}
}
fn track_pseudo(name: &str, state: &mut u8, message_type: MessageType) -> Res<bool> {
let (pseudo, bit) = if name.starts_with(':') {
if *state & REGULAR_HEADER != 0 {
fn track_pseudo(
name: &str,
result_state: &mut EnumSet<PseudoHeaderState>,
message_type: MessageType,
) -> Res<bool> {
let new_state = if name.starts_with(':') {
if result_state.contains(PseudoHeaderState::Regular) {
return Err(Error::InvalidHeader);
}
let bit = match (message_type, name) {
(MessageType::Response, ":status") => PSEUDO_HEADER_STATUS,
(MessageType::Request, ":method") => PSEUDO_HEADER_METHOD,
(MessageType::Request, ":scheme") => PSEUDO_HEADER_SCHEME,
(MessageType::Request, ":authority") => PSEUDO_HEADER_AUTHORITY,
(MessageType::Request, ":path") => PSEUDO_HEADER_PATH,
(MessageType::Request, ":protocol") => PSEUDO_HEADER_PROTOCOL,
(_, _) => return Err(Error::InvalidHeader),
};
(true, bit)
PseudoHeaderState::try_from((message_type, name))?
} else {
(false, REGULAR_HEADER)
PseudoHeaderState::Regular
};
if *state & bit == 0 || !pseudo {
*state |= bit;
let pseudo = new_state.is_pseudo();
if *result_state & new_state == EnumSet::empty() || !pseudo {
*result_state |= new_state;
Ok(pseudo)
} else {
Err(Error::InvalidHeader)
@ -63,7 +93,7 @@ fn track_pseudo(name: &str, state: &mut u8, message_type: MessageType) -> Res<bo
/// Returns an error if headers are not well formed.
pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> {
let mut method_value: Option<&str> = None;
let mut pseudo_state = 0;
let mut pseudo_state = EnumSet::new();
for header in headers {
let is_pseudo = track_pseudo(header.name(), &mut pseudo_state, message_type)?;
@ -80,20 +110,20 @@ pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> {
}
}
// Clear the regular header bit, since we only check pseudo headers below.
pseudo_state &= !REGULAR_HEADER;
pseudo_state.remove(PseudoHeaderState::Regular);
let pseudo_header_mask = match message_type {
MessageType::Response => PSEUDO_HEADER_STATUS,
MessageType::Response => enum_set!(PseudoHeaderState::Status),
MessageType::Request => {
if method_value == Some(&"CONNECT".to_string()) {
PSEUDO_HEADER_METHOD | PSEUDO_HEADER_AUTHORITY
PseudoHeaderState::Method | PseudoHeaderState::Authority
} else {
PSEUDO_HEADER_METHOD | PSEUDO_HEADER_SCHEME | PSEUDO_HEADER_PATH
PseudoHeaderState::Method | PseudoHeaderState::Scheme | PseudoHeaderState::Path
}
}
};
if (MessageType::Request == message_type)
&& ((pseudo_state & PSEUDO_HEADER_PROTOCOL) > 0)
&& pseudo_state.contains(PseudoHeaderState::Protocol)
&& method_value != Some(&"CONNECT".to_string())
{
return Err(Error::InvalidHeader);

View file

@ -1,984 +0,0 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::settings::HSettings;
use neqo_common::{
hex_with_len, qtrace, Decoder, Encoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore,
IncrementalDecoderUint,
};
use neqo_crypto::random;
use neqo_transport::{Connection, StreamId};
use std::convert::TryFrom;
use std::io::Write;
use std::mem;
use crate::{Error, Priority, Res};
pub(crate) type HFrameType = u64;
pub(crate) const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
pub(crate) const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = 0x3;
pub(crate) const H3_FRAME_TYPE_SETTINGS: HFrameType = 0x4;
const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = 0x5;
const H3_FRAME_TYPE_GOAWAY: HFrameType = 0x7;
const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = 0xd;
const H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST: HFrameType = 0xf0700;
const H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH: HFrameType = 0xf0701;
pub const H3_RESERVED_FRAME_TYPES: &[HFrameType] = &[0x2, 0x6, 0x8, 0x9];
const MAX_READ_SIZE: usize = 4096;
// data for DATA frame is not read into HFrame::Data.
#[derive(PartialEq, Debug)]
pub enum HFrame {
Data {
len: u64, // length of the data
},
Headers {
header_block: Vec<u8>,
},
CancelPush {
push_id: u64,
},
Settings {
settings: HSettings,
},
PushPromise {
push_id: u64,
header_block: Vec<u8>,
},
Goaway {
stream_id: StreamId,
},
MaxPushId {
push_id: u64,
},
Grease,
PriorityUpdateRequest {
element_id: u64,
priority: Priority,
},
PriorityUpdatePush {
element_id: u64,
priority: Priority,
},
}
impl HFrame {
fn get_type(&self) -> HFrameType {
match self {
Self::Data { .. } => H3_FRAME_TYPE_DATA,
Self::Headers { .. } => H3_FRAME_TYPE_HEADERS,
Self::CancelPush { .. } => H3_FRAME_TYPE_CANCEL_PUSH,
Self::Settings { .. } => H3_FRAME_TYPE_SETTINGS,
Self::PushPromise { .. } => H3_FRAME_TYPE_PUSH_PROMISE,
Self::Goaway { .. } => H3_FRAME_TYPE_GOAWAY,
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
Self::Grease => {
let r = random(7);
Decoder::from(&r).decode_uint(7).unwrap() * 0x1f + 0x21
}
}
}
pub fn encode(&self, enc: &mut Encoder) {
enc.encode_varint(self.get_type());
match self {
Self::Data { len } => {
// DATA frame only encode the length here.
enc.encode_varint(*len);
}
Self::Headers { header_block } => {
enc.encode_vvec(header_block);
}
Self::CancelPush { push_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(*push_id);
});
}
Self::Settings { settings } => {
settings.encode_frame_contents(enc);
}
Self::PushPromise {
push_id,
header_block,
} => {
enc.encode_varint((header_block.len() + (Encoder::varint_len(*push_id))) as u64);
enc.encode_varint(*push_id);
enc.encode(header_block);
}
Self::Goaway { stream_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(stream_id.as_u64());
});
}
Self::MaxPushId { push_id } => {
enc.encode_vvec_with(|enc_inner| {
enc_inner.encode_varint(*push_id);
});
}
Self::Grease => {
// Encode some number of random bytes.
let r = random(8);
enc.encode_vvec(&r[1..usize::from(1 + (r[0] & 0x7))]);
}
Self::PriorityUpdateRequest {
element_id,
priority,
}
| Self::PriorityUpdatePush {
element_id,
priority,
} => {
let mut update_frame = Encoder::new();
update_frame.encode_varint(*element_id);
let mut priority_enc: Vec<u8> = Vec::new();
write!(priority_enc, "{}", priority).unwrap();
update_frame.encode(&priority_enc);
enc.encode_varint(update_frame.len() as u64);
enc.encode(&update_frame);
}
}
}
}
#[derive(Clone, Debug)]
enum HFrameReaderState {
GetType { decoder: IncrementalDecoderUint },
GetLength { decoder: IncrementalDecoderUint },
GetData { decoder: IncrementalDecoderBuffer },
UnknownFrameDischargeData { decoder: IncrementalDecoderIgnore },
}
#[derive(Debug)]
pub struct HFrameReader {
state: HFrameReaderState,
hframe_type: u64,
hframe_len: u64,
payload: Vec<u8>,
}
impl Default for HFrameReader {
fn default() -> Self {
Self::new()
}
}
impl HFrameReader {
#[must_use]
pub fn new() -> Self {
Self {
state: HFrameReaderState::GetType {
decoder: IncrementalDecoderUint::default(),
},
hframe_type: 0,
hframe_len: 0,
payload: Vec::new(),
}
}
#[must_use]
pub fn new_with_type(hframe_type: u64) -> Self {
Self {
state: HFrameReaderState::GetLength {
decoder: IncrementalDecoderUint::default(),
},
hframe_type,
hframe_len: 0,
payload: Vec::new(),
}
}
fn reset(&mut self) {
self.state = HFrameReaderState::GetType {
decoder: IncrementalDecoderUint::default(),
};
}
fn min_remaining(&self) -> usize {
match &self.state {
HFrameReaderState::GetType { decoder } | HFrameReaderState::GetLength { decoder } => {
decoder.min_remaining()
}
HFrameReaderState::GetData { decoder } => decoder.min_remaining(),
HFrameReaderState::UnknownFrameDischargeData { decoder } => decoder.min_remaining(),
}
}
fn decoding_in_progress(&self) -> bool {
if let HFrameReaderState::GetType { decoder } = &self.state {
decoder.decoding_in_progress()
} else {
true
}
}
/// returns true if quic stream was closed.
/// # Errors
/// May return `HttpFrame` if a frame cannot be decoded.
/// and `TransportStreamDoesNotExist` if `stream_recv` fails.
pub fn receive(
&mut self,
conn: &mut Connection,
stream_id: StreamId,
) -> Res<(Option<HFrame>, bool)> {
loop {
let to_read = std::cmp::min(self.min_remaining(), MAX_READ_SIZE);
let mut buf = vec![0; to_read];
let (output, read, fin) = match conn
.stream_recv(stream_id, &mut buf)
.map_err(|e| Error::map_stream_recv_errors(&e))?
{
(0, f) => (None, false, f),
(amount, f) => {
qtrace!(
[conn],
"HFrameReader::receive: reading {} byte, fin={}",
amount,
f
);
(self.consume(Decoder::from(&buf[..amount]))?, true, f)
}
};
if output.is_some() {
break Ok((output, fin));
}
if fin {
if self.decoding_in_progress() {
break Err(Error::HttpFrame);
}
break Ok((None, fin));
}
if !read {
// There was no new data, exit the loop.
break Ok((None, false));
}
}
}
/// # Errors
/// May return `HttpFrame` if a frame cannot be decoded.
fn consume(&mut self, mut input: Decoder) -> Res<Option<HFrame>> {
match &mut self.state {
HFrameReaderState::GetType { decoder } => {
if let Some(v) = decoder.consume(&mut input) {
qtrace!("HFrameReader::receive: read frame type {}", v);
self.hframe_type = v;
if H3_RESERVED_FRAME_TYPES.contains(&self.hframe_type) {
return Err(Error::HttpFrameUnexpected);
}
self.state = HFrameReaderState::GetLength {
decoder: IncrementalDecoderUint::default(),
};
}
}
HFrameReaderState::GetLength { decoder } => {
if let Some(len) = decoder.consume(&mut input) {
qtrace!(
"HFrameReader::receive: frame type {} length {}",
self.hframe_type,
len
);
self.hframe_len = len;
self.state = match self.hframe_type {
// DATA payload are left on the quic stream and picked up separately
H3_FRAME_TYPE_DATA => {
return Ok(Some(self.get_frame()?));
}
// for other frames get all data before decoding.
H3_FRAME_TYPE_CANCEL_PUSH
| H3_FRAME_TYPE_SETTINGS
| H3_FRAME_TYPE_GOAWAY
| H3_FRAME_TYPE_MAX_PUSH_ID
| H3_FRAME_TYPE_PUSH_PROMISE
| H3_FRAME_TYPE_HEADERS
| H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST
| H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH => {
if len == 0 {
return Ok(Some(self.get_frame()?));
}
HFrameReaderState::GetData {
decoder: IncrementalDecoderBuffer::new(
usize::try_from(len).or(Err(Error::HttpFrame))?,
),
}
}
_ => {
if len == 0 {
HFrameReaderState::GetType {
decoder: IncrementalDecoderUint::default(),
}
} else {
HFrameReaderState::UnknownFrameDischargeData {
decoder: IncrementalDecoderIgnore::new(
usize::try_from(len).or(Err(Error::HttpFrame))?,
),
}
}
}
};
}
}
HFrameReaderState::GetData { decoder } => {
if let Some(data) = decoder.consume(&mut input) {
qtrace!(
"received frame {}: {}",
self.hframe_type,
hex_with_len(&data[..])
);
self.payload = data;
return Ok(Some(self.get_frame()?));
}
}
HFrameReaderState::UnknownFrameDischargeData { decoder } => {
if decoder.consume(&mut input) {
self.reset();
}
}
}
Ok(None)
}
/// # Errors
/// May return `HttpFrame` if a frame cannot be decoded.
fn get_frame(&mut self) -> Res<HFrame> {
let payload = mem::take(&mut self.payload);
let mut dec = Decoder::from(&payload[..]);
let f = match self.hframe_type {
H3_FRAME_TYPE_DATA => HFrame::Data {
len: self.hframe_len,
},
H3_FRAME_TYPE_HEADERS => HFrame::Headers {
header_block: dec.decode_remainder().to_vec(),
},
H3_FRAME_TYPE_CANCEL_PUSH => HFrame::CancelPush {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
},
H3_FRAME_TYPE_SETTINGS => {
let mut settings = HSettings::default();
settings.decode_frame_contents(&mut dec).map_err(|e| {
if e == Error::HttpSettings {
e
} else {
Error::HttpFrame
}
})?;
HFrame::Settings { settings }
}
H3_FRAME_TYPE_PUSH_PROMISE => HFrame::PushPromise {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
header_block: dec.decode_remainder().to_vec(),
},
H3_FRAME_TYPE_GOAWAY => HFrame::Goaway {
stream_id: StreamId::new(dec.decode_varint().ok_or(Error::HttpFrame)?),
},
H3_FRAME_TYPE_MAX_PUSH_ID => HFrame::MaxPushId {
push_id: dec.decode_varint().ok_or(Error::HttpFrame)?,
},
H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST | H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH => {
let element_id = dec.decode_varint().ok_or(Error::HttpFrame)?;
let priority = dec.decode_remainder();
let priority = Priority::from_bytes(priority)?;
if self.hframe_type == H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST {
HFrame::PriorityUpdateRequest {
element_id,
priority,
}
} else {
HFrame::PriorityUpdatePush {
element_id,
priority,
}
}
}
_ => panic!("We should not be calling this function with unknown frame type!"),
};
self.reset();
Ok(f)
}
}
#[cfg(test)]
mod tests {
use super::{Decoder, Encoder, Error, HFrame, HFrameReader, HSettings};
use crate::settings::{HSetting, HSettingType};
use crate::Priority;
use neqo_crypto::AuthenticationStatus;
use neqo_transport::{Connection, StreamId, StreamType};
use std::mem;
use test_fixture::{connect, default_client, default_server, fixture_init, now};
#[allow(clippy::many_single_char_names)]
fn enc_dec(f: &HFrame, st: &str, remaining: usize) {
let mut d = Encoder::default();
f.encode(&mut d);
// For data, headers and push_promise we do not read all bytes from the buffer
let d2 = Encoder::from_hex(st);
assert_eq!(&d[..], &d2[..d.len()]);
let mut conn_c = default_client();
let mut conn_s = default_server();
let out = conn_c.process(None, now());
let out = conn_s.process(out.dgram(), now());
let out = conn_c.process(out.dgram(), now());
mem::drop(conn_s.process(out.dgram(), now()));
conn_c.authenticated(AuthenticationStatus::Ok, now());
let out = conn_c.process(None, now());
mem::drop(conn_s.process(out.dgram(), now()));
// create a stream
let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap();
let mut fr: HFrameReader = HFrameReader::new();
// conver string into u8 vector
let buf = Encoder::from_hex(st);
conn_s.stream_send(stream_id, &buf[..]).unwrap();
let out = conn_s.process(None, now());
mem::drop(conn_c.process(out.dgram(), now()));
let (frame, fin) = fr.receive(&mut conn_c, stream_id).unwrap();
assert!(!fin);
assert!(frame.is_some());
assert_eq!(*f, frame.unwrap());
// Check remaining data.
let mut buf = [0_u8; 100];
let (amount, _) = conn_c.stream_recv(stream_id, &mut buf).unwrap();
assert_eq!(amount, remaining);
}
#[test]
fn test_data_frame() {
let f = HFrame::Data { len: 3 };
enc_dec(&f, "0003010203", 3);
}
#[test]
fn test_headers_frame() {
let f = HFrame::Headers {
header_block: vec![0x01, 0x02, 0x03],
};
enc_dec(&f, "0103010203", 0);
}
#[test]
fn test_cancel_push_frame4() {
let f = HFrame::CancelPush { push_id: 5 };
enc_dec(&f, "030105", 0);
}
#[test]
fn test_settings_frame4() {
let f = HFrame::Settings {
settings: HSettings::new(&[HSetting::new(HSettingType::MaxHeaderListSize, 4)]),
};
enc_dec(&f, "04020604", 0);
}
#[test]
fn test_push_promise_frame4() {
let f = HFrame::PushPromise {
push_id: 4,
header_block: vec![0x61, 0x62, 0x63, 0x64],
};
enc_dec(&f, "05050461626364", 0);
}
#[test]
fn test_goaway_frame4() {
let f = HFrame::Goaway {
stream_id: StreamId::new(5),
};
enc_dec(&f, "070105", 0);
}
#[test]
fn test_max_push_id_frame4() {
let f = HFrame::MaxPushId { push_id: 5 };
enc_dec(&f, "0d0105", 0);
}
#[test]
fn grease() {
fn make_grease() -> u64 {
let mut enc = Encoder::default();
HFrame::Grease.encode(&mut enc);
let mut dec = Decoder::from(&enc);
let ft = dec.decode_varint().unwrap();
assert_eq!((ft - 0x21) % 0x1f, 0);
let body = dec.decode_vvec().unwrap();
assert!(body.len() <= 7);
ft
}
fixture_init();
let t1 = make_grease();
let t2 = make_grease();
assert_ne!(t1, t2);
}
#[test]
fn test_priority_update_request_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 6,
priority: Priority::default(),
};
enc_dec(&f, "800f07000106", 0);
}
#[test]
fn test_priority_update_request_incremental_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 7,
priority: Priority::new(6, false),
};
enc_dec(&f, "800f07000407753d36", 0); // "u=6"
}
#[test]
fn test_priority_update_request_urgency_default() {
let f = HFrame::PriorityUpdateRequest {
element_id: 8,
priority: Priority::new(3, true),
};
enc_dec(&f, "800f0700020869", 0); // "i"
}
#[test]
fn test_priority_update_request() {
let f = HFrame::PriorityUpdateRequest {
element_id: 9,
priority: Priority::new(5, true),
};
enc_dec(&f, "800f07000609753d352c69", 0); // "u=5,i"
}
#[test]
fn test_priority_update_push_default() {
let f = HFrame::PriorityUpdatePush {
element_id: 10,
priority: Priority::default(),
};
enc_dec(&f, "800f0701010a", 0);
}
struct HFrameReaderTest {
pub fr: HFrameReader,
pub conn_c: Connection,
pub conn_s: Connection,
pub stream_id: StreamId,
}
impl HFrameReaderTest {
pub fn new() -> Self {
let (conn_c, mut conn_s) = connect();
let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap();
Self {
fr: HFrameReader::new(),
conn_c,
conn_s,
stream_id,
}
}
fn process(&mut self, v: &[u8]) -> Option<HFrame> {
self.conn_s.stream_send(self.stream_id, v).unwrap();
let out = self.conn_s.process(None, now());
mem::drop(self.conn_c.process(out.dgram(), now()));
let (frame, fin) = self.fr.receive(&mut self.conn_c, self.stream_id).unwrap();
assert!(!fin);
frame
}
}
// Test receiving byte by byte for a SETTINGS frame.
#[test]
fn test_frame_reading_with_stream_settings1() {
let mut fr = HFrameReaderTest::new();
// Send and read settings frame 040406040804
assert!(fr.process(&[0x4]).is_none());
assert!(fr.process(&[0x4]).is_none());
assert!(fr.process(&[0x6]).is_none());
assert!(fr.process(&[0x4]).is_none());
assert!(fr.process(&[0x8]).is_none());
let frame = fr.process(&[0x4]);
assert!(frame.is_some());
if let HFrame::Settings { settings } = frame.unwrap() {
assert!(settings.len() == 1);
assert!(settings[0] == HSetting::new(HSettingType::MaxHeaderListSize, 4));
} else {
panic!("wrong frame type");
}
}
// Test receiving byte by byte for a SETTINGS frame with larger varints
#[test]
fn test_frame_reading_with_stream_settings2() {
let mut fr = HFrameReaderTest::new();
// Read settings frame 400406064004084100
for i in &[0x40, 0x04, 0x06, 0x06, 0x40, 0x04, 0x08, 0x41] {
assert!(fr.process(&[*i]).is_none());
}
let frame = fr.process(&[0x0]);
assert!(frame.is_some());
if let HFrame::Settings { settings } = frame.unwrap() {
assert!(settings.len() == 1);
assert!(settings[0] == HSetting::new(HSettingType::MaxHeaderListSize, 4));
} else {
panic!("wrong frame type");
}
}
// Test receiving byte by byte for a PUSH_PROMISE frame.
#[test]
fn test_frame_reading_with_stream_push_promise() {
let mut fr = HFrameReaderTest::new();
// Read push-promise frame 05054101010203
for i in &[0x05, 0x05, 0x41, 0x01, 0x01, 0x02] {
assert!(fr.process(&[*i]).is_none());
}
let frame = fr.process(&[0x3]);
assert!(frame.is_some());
if let HFrame::PushPromise {
push_id,
header_block,
} = frame.unwrap()
{
assert_eq!(push_id, 257);
assert_eq!(header_block, &[0x1, 0x2, 0x3]);
} else {
panic!("wrong frame type");
}
}
// Test DATA
#[test]
fn test_frame_reading_with_stream_data() {
let mut fr = HFrameReaderTest::new();
// Read data frame 0003010203
let frame = fr.process(&[0x0, 0x3, 0x1, 0x2, 0x3]).unwrap();
assert!(matches!(frame, HFrame::Data { len } if len == 3));
// payloead is still on the stream.
// assert that we have 3 bytes in the stream
let mut buf = [0_u8; 100];
let (amount, _) = fr.conn_c.stream_recv(fr.stream_id, &mut buf).unwrap();
assert_eq!(amount, 3);
}
// Test an unknown frame
#[test]
fn test_unknown_frame() {
// Construct an unknown frame.
const UNKNOWN_FRAME_LEN: usize = 832;
let mut fr = HFrameReaderTest::new();
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
assert!(fr.process(&buf).is_none());
// now receive a CANCEL_PUSH fram to see that frame reader is ok.
let frame = fr.process(&[0x03, 0x01, 0x05]);
assert!(frame.is_some());
if let HFrame::CancelPush { push_id } = frame.unwrap() {
assert!(push_id == 5);
} else {
panic!("wrong frame type");
}
}
enum FrameReadingTestSend {
OnlyData,
DataWithFin,
DataThenFin,
}
enum FrameReadingTestExpect {
Error,
Incomplete,
FrameComplete,
FrameAndStreamComplete,
StreamDoneWithoutFrame,
}
fn test_reading_frame(
buf: &[u8],
test_to_send: &FrameReadingTestSend,
expected_result: &FrameReadingTestExpect,
) {
let mut fr = HFrameReaderTest::new();
fr.conn_s.stream_send(fr.stream_id, buf).unwrap();
if let FrameReadingTestSend::DataWithFin = test_to_send {
fr.conn_s.stream_close_send(fr.stream_id).unwrap();
}
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
if let FrameReadingTestSend::DataThenFin = test_to_send {
fr.conn_s.stream_close_send(fr.stream_id).unwrap();
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
}
let rv = fr.fr.receive(&mut fr.conn_c, fr.stream_id);
match expected_result {
FrameReadingTestExpect::Error => assert_eq!(Err(Error::HttpFrame), rv),
FrameReadingTestExpect::Incomplete => {
assert_eq!(Ok((None, false)), rv);
}
FrameReadingTestExpect::FrameComplete => {
let (f, fin) = rv.unwrap();
assert!(!fin);
assert!(f.is_some());
}
FrameReadingTestExpect::FrameAndStreamComplete => {
let (f, fin) = rv.unwrap();
assert!(fin);
assert!(f.is_some());
}
FrameReadingTestExpect::StreamDoneWithoutFrame => {
let (f, fin) = rv.unwrap();
assert!(fin);
assert!(f.is_none());
}
};
}
#[test]
fn test_complete_and_incomplete_unknown_frame() {
// Construct an unknown frame.
const UNKNOWN_FRAME_LEN: usize = 832;
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
let len = std::cmp::min(buf.len() - 1, 10);
for i in 1..len {
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::Incomplete,
);
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::DataWithFin,
&FrameReadingTestExpect::Error,
);
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::DataThenFin,
&FrameReadingTestExpect::Error,
);
}
test_reading_frame(
&buf,
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::Incomplete,
);
test_reading_frame(
&buf,
&FrameReadingTestSend::DataWithFin,
&FrameReadingTestExpect::StreamDoneWithoutFrame,
);
test_reading_frame(
&buf,
&FrameReadingTestSend::DataThenFin,
&FrameReadingTestExpect::StreamDoneWithoutFrame,
);
}
// if we read more than done_state bytes HFrameReader will be in done state.
fn test_complete_and_incomplete_frame(buf: &[u8], done_state: usize) {
use std::cmp::Ordering;
// Let's consume partial frames. It is enough to test partal frames
// up to 10 byte. 10 byte is greater than frame type and frame
// length and bit of data.
let len = std::cmp::min(buf.len() - 1, 10);
for i in 1..len {
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::OnlyData,
if i >= done_state {
&FrameReadingTestExpect::FrameComplete
} else {
&FrameReadingTestExpect::Incomplete
},
);
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::DataWithFin,
match i.cmp(&done_state) {
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
Ordering::Less => &FrameReadingTestExpect::Error,
},
);
test_reading_frame(
&buf[..i],
&FrameReadingTestSend::DataThenFin,
match i.cmp(&done_state) {
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
Ordering::Less => &FrameReadingTestExpect::Error,
},
);
}
test_reading_frame(
buf,
&FrameReadingTestSend::OnlyData,
&FrameReadingTestExpect::FrameComplete,
);
test_reading_frame(
buf,
&FrameReadingTestSend::DataWithFin,
if buf.len() == done_state {
&FrameReadingTestExpect::FrameAndStreamComplete
} else {
&FrameReadingTestExpect::FrameComplete
},
);
test_reading_frame(
buf,
&FrameReadingTestSend::DataThenFin,
if buf.len() == done_state {
&FrameReadingTestExpect::FrameAndStreamComplete
} else {
&FrameReadingTestExpect::FrameComplete
},
);
}
#[test]
fn test_complete_and_incomplete_frames() {
const FRAME_LEN: usize = 10;
const HEADER_BLOCK: &[u8] = &[0x01, 0x02, 0x03, 0x04];
// H3_FRAME_TYPE_DATA len=0
let f = HFrame::Data { len: 0 };
let mut enc = Encoder::with_capacity(2);
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, 2);
// H3_FRAME_TYPE_DATA len=FRAME_LEN
let f = HFrame::Data {
len: FRAME_LEN as u64,
};
let mut enc = Encoder::with_capacity(2);
f.encode(&mut enc);
let mut buf: Vec<_> = enc.into();
buf.resize(FRAME_LEN + buf.len(), 0);
test_complete_and_incomplete_frame(&buf, 2);
// H3_FRAME_TYPE_HEADERS empty header block
let f = HFrame::Headers {
header_block: Vec::new(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, 2);
// H3_FRAME_TYPE_HEADERS
let f = HFrame::Headers {
header_block: HEADER_BLOCK.to_vec(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
// H3_FRAME_TYPE_CANCEL_PUSH
let f = HFrame::CancelPush { push_id: 5 };
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
// H3_FRAME_TYPE_SETTINGS
let f = HFrame::Settings {
settings: HSettings::new(&[HSetting::new(HSettingType::MaxHeaderListSize, 4)]),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
// H3_FRAME_TYPE_PUSH_PROMISE
let f = HFrame::PushPromise {
push_id: 4,
header_block: HEADER_BLOCK.to_vec(),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
// H3_FRAME_TYPE_GOAWAY
let f = HFrame::Goaway {
stream_id: StreamId::new(5),
};
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
// H3_FRAME_TYPE_MAX_PUSH_ID
let f = HFrame::MaxPushId { push_id: 5 };
let mut enc = Encoder::default();
f.encode(&mut enc);
let buf: Vec<_> = enc.into();
test_complete_and_incomplete_frame(&buf, buf.len());
}
// Test closing a stream before any frame is sent should not cause an error.
#[test]
fn test_frame_reading_when_stream_is_closed_before_sending_data() {
let mut fr = HFrameReaderTest::new();
fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap();
let out = fr.conn_s.process(None, now());
mem::drop(fr.conn_c.process(out.dgram(), now()));
assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id));
let out = fr.conn_c.process(None, now());
mem::drop(fr.conn_s.process(out.dgram(), now()));
assert_eq!(
Ok((None, true)),
fr.fr.receive(&mut fr.conn_s, fr.stream_id)
);
}
}

View file

@ -16,8 +16,8 @@ mod connection_server;
mod control_stream_local;
mod control_stream_remote;
pub mod features;
pub mod frames;
mod headers_checks;
pub mod hframe;
mod priority;
mod push_controller;
mod qlog;
@ -29,7 +29,7 @@ mod send_message;
pub mod server;
mod server_connection_events;
mod server_events;
mod settings;
pub mod settings;
mod stream_type_reader;
use neqo_qpack::Error as QpackError;
@ -43,7 +43,8 @@ pub use client_events::{Http3ClientEvent, WebTransportEvent};
pub use conn_params::Http3Parameters;
pub use connection::Http3State;
pub use connection_client::Http3Client;
pub use hframe::{HFrame, HFrameReader};
use features::extended_connect::WebTransportSession;
pub use frames::HFrame;
pub use neqo_common::{Header, MessageType};
pub use priority::Priority;
pub use server::Http3Server;
@ -51,6 +52,9 @@ pub use server_events::{
Http3OrWebTransportStream, Http3ServerEvent, WebTransportRequest, WebTransportServerEvent,
};
pub use settings::HttpZeroRttChecker;
use std::any::Any;
use std::cell::RefCell;
use std::rc::Rc;
pub use stream_type_reader::NewStreamType;
type Res<T> = Result<T, Error>;
@ -81,6 +85,7 @@ pub enum Error {
HttpRequestIncomplete,
HttpConnect,
HttpVersionFallback,
HttpMessageError,
QpackError(neqo_qpack::Error),
// Internal errors from here.
@ -125,6 +130,7 @@ impl Error {
Self::HttpRequestRejected => 0x10b,
Self::HttpRequestCancelled => 0x10c,
Self::HttpRequestIncomplete => 0x10d,
Self::HttpMessageError => 0x10e,
Self::HttpConnect => 0x10f,
Self::HttpVersionFallback => 0x110,
Self::QpackError(e) => e.code(),
@ -191,15 +197,15 @@ impl Error {
/// # Panics
/// On unexpected errors, in debug mode.
#[must_use]
pub fn map_stream_recv_errors(err: &TransportError) -> Self {
pub fn map_stream_recv_errors(err: &Error) -> Self {
match err {
TransportError::NoMoreData => {
Self::TransportError(TransportError::NoMoreData) => {
debug_assert!(
false,
"Do not call stream_recv if FIN has been previously read"
);
}
TransportError::InvalidStreamId => {}
Self::TransportError(TransportError::InvalidStreamId) => {}
_ => {
debug_assert!(false, "Unexpected error");
}
@ -340,6 +346,10 @@ pub trait RecvStream: Stream {
fn http_stream(&mut self) -> Option<&mut dyn HttpRecvStream> {
None
}
fn webtransport(&self) -> Option<Rc<RefCell<WebTransportSession>>> {
None
}
}
pub trait HttpRecvStream: RecvStream {
@ -350,12 +360,16 @@ pub trait HttpRecvStream: RecvStream {
/// An error may happen while reading a stream, e.g. early close, protocol error, etc.
fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>;
fn priority_handler_mut(&mut self) -> &mut PriorityHandler;
fn maybe_update_priority(&mut self, priority: Priority) -> bool;
fn priority_update_frame(&mut self) -> Option<HFrame>;
fn priority_update_sent(&mut self);
fn set_new_listener(&mut self, _conn_events: Box<dyn HttpRecvStreamEvents>) {}
fn extended_connect_wait_for_response(&self) -> bool {
false
}
fn any(&self) -> &dyn Any;
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
@ -394,7 +408,7 @@ impl Http3StreamInfo {
}
pub trait RecvStreamEvents: Debug {
fn data_readable(&self, stream_info: Http3StreamInfo);
fn data_readable(&self, _stream_info: Http3StreamInfo) {}
fn recv_closed(&self, _stream_info: Http3StreamInfo, _close_type: CloseType) {}
}
@ -419,15 +433,32 @@ pub trait SendStream: Stream {
/// # Errors
/// Error my occure during sending data, e.g. protocol error, etc.
fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<usize>;
/// # Errors
/// It may happen that the transport stream is already close. This is unlikely.
fn close(&mut self, conn: &mut Connection) -> Res<()>;
/// # Errors
/// It may happen that the transport stream is already close. This is unlikely.
fn close_with_message(
&mut self,
_conn: &mut Connection,
_error: u32,
_message: &str,
) -> Res<()> {
Err(Error::InvalidStreamId)
}
/// This function is called when sending side is closed abruptly by the peer or
/// the application.
fn handle_stop_sending(&mut self, close_type: CloseType);
fn http_stream(&mut self) -> Option<&mut dyn HttpSendStream> {
None
}
/// # Errors
/// It may happen that the transport stream is already close. This is unlikely.
fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> {
Err(Error::InvalidStreamId)
}
}
pub trait HttpSendStream: SendStream {
@ -438,6 +469,7 @@ pub trait HttpSendStream: SendStream {
/// This can also return an error if the underlying stream is closed.
fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>;
fn set_new_listener(&mut self, _conn_events: Box<dyn SendStreamEvents>) {}
fn any(&self) -> &dyn Any;
}
pub trait SendStreamEvents: Debug {

View file

@ -1,4 +1,4 @@
use crate::{Error, HFrame, Header, Res};
use crate::{frames::HFrame, Error, Header, Res};
use neqo_transport::StreamId;
use sfv::{BareItem, Item, ListEntry, Parser};
use std::convert::TryFrom;
@ -111,9 +111,9 @@ impl PriorityHandler {
}
}
pub fn priority(&self) -> Priority {
/*pub fn priority(&self) -> Priority {
self.priority
}
}*/
/// Returns if an priority update will be issued
pub fn maybe_update_priority(&mut self, priority: Priority) -> bool {

View file

@ -5,7 +5,7 @@
use crate::client_events::{Http3ClientEvent, Http3ClientEvents};
use crate::connection::Http3Connection;
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::{CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res};
use neqo_common::{qerror, qinfo, qtrace, Header};
use neqo_transport::{Connection, StreamId};

View file

@ -4,17 +4,18 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::hframe::{HFrame, HFrameReader, H3_FRAME_TYPE_HEADERS};
use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS};
use crate::push_controller::PushController;
use crate::{
headers_checks::{headers_valid, is_interim},
priority::PriorityHandler,
qlog, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpRecvStream, HttpRecvStreamEvents,
MessageType, ReceiveOutput, RecvStream, Res, Stream,
MessageType, Priority, ReceiveOutput, RecvStream, Res, Stream,
};
use neqo_common::{qdebug, qinfo, qtrace, Header};
use neqo_qpack::decoder::QPackDecoder;
use neqo_transport::{Connection, StreamId};
use std::any::Any;
use std::cell::RefCell;
use std::cmp::min;
use std::collections::VecDeque;
@ -52,11 +53,11 @@ pub struct RecvMessageInfo {
*/
#[derive(Debug)]
enum RecvMessageState {
WaitingForResponseHeaders { frame_reader: HFrameReader },
WaitingForResponseHeaders { frame_reader: FrameReader },
DecodingHeaders { header_block: Vec<u8>, fin: bool },
WaitingForData { frame_reader: HFrameReader },
WaitingForData { frame_reader: FrameReader },
ReadingData { remaining_data_len: usize },
WaitingForFinAfterTrailers { frame_reader: HFrameReader },
WaitingForFinAfterTrailers { frame_reader: FrameReader },
ClosePending, // Close must first be read by application
Closed,
ExtendedConnect,
@ -98,9 +99,9 @@ impl RecvMessage {
Self {
state: RecvMessageState::WaitingForResponseHeaders {
frame_reader: if message_info.header_frame_type_read {
HFrameReader::new_with_type(H3_FRAME_TYPE_HEADERS)
FrameReader::new_with_type(H3_FRAME_TYPE_HEADERS)
} else {
HFrameReader::new()
FrameReader::new()
},
},
message_type: message_info.message_type,
@ -124,7 +125,7 @@ impl RecvMessage {
}
RecvMessageState::WaitingForData { ..} => {
// TODO implement trailers, for now just ignore them.
self.state = RecvMessageState::WaitingForFinAfterTrailers{frame_reader: HFrameReader::new()};
self.state = RecvMessageState::WaitingForFinAfterTrailers{frame_reader: FrameReader::new()};
}
RecvMessageState::WaitingForFinAfterTrailers {..} => {
return Err(Error::HttpFrameUnexpected);
@ -188,14 +189,15 @@ impl RecvMessage {
self.set_closed();
} else {
self.state = if is_web_transport {
self.stream_type = Http3StreamType::ExtendedConnect;
RecvMessageState::ExtendedConnect
} else if interim {
RecvMessageState::WaitingForResponseHeaders {
frame_reader: HFrameReader::new(),
frame_reader: FrameReader::new(),
}
} else {
RecvMessageState::WaitingForData {
frame_reader: HFrameReader::new(),
frame_reader: FrameReader::new(),
}
};
}
@ -264,7 +266,10 @@ impl RecvMessage {
RecvMessageState::WaitingForResponseHeaders { frame_reader }
| RecvMessageState::WaitingForData { frame_reader }
| RecvMessageState::WaitingForFinAfterTrailers { frame_reader } => {
match frame_reader.receive(conn, self.stream_id)? {
match frame_reader.receive(&mut StreamReaderConnectionWrapper::new(
conn,
self.stream_id,
))? {
(None, true) => {
break self.set_state_to_close_pending(post_readable_event);
}
@ -410,7 +415,7 @@ impl RecvStream for RecvMessage {
let to_read = min(*remaining_data_len, buf.len() - written);
let (amount, fin) = conn
.stream_recv(self.stream_id, &mut buf[written..written + to_read])
.map_err(|e| Error::map_stream_recv_errors(&e))?;
.map_err(|e| Error::map_stream_recv_errors(&Error::from(e)))?;
qlog::h3_data_moved_up(conn.qlog_mut(), self.stream_id, amount);
debug_assert!(amount <= to_read);
@ -425,7 +430,7 @@ impl RecvStream for RecvMessage {
break Ok((written, fin));
} else if *remaining_data_len == 0 {
self.state = RecvMessageState::WaitingForData {
frame_reader: HFrameReader::new(),
frame_reader: FrameReader::new(),
};
self.receive_internal(conn, false)?;
} else {
@ -468,15 +473,30 @@ impl HttpRecvStream for RecvMessage {
self.receive(conn)
}
fn priority_handler_mut(&mut self) -> &mut PriorityHandler {
&mut self.priority_handler
fn maybe_update_priority(&mut self, priority: Priority) -> bool {
self.priority_handler.maybe_update_priority(priority)
}
fn priority_update_frame(&mut self) -> Option<HFrame> {
self.priority_handler.maybe_encode_frame(self.stream_id)
}
fn priority_update_sent(&mut self) {
self.priority_handler.priority_update_sent();
}
fn set_new_listener(&mut self, conn_events: Box<dyn HttpRecvStreamEvents>) {
self.state = RecvMessageState::WaitingForData {
frame_reader: FrameReader::new(),
};
self.conn_events = conn_events;
}
fn extended_connect_wait_for_response(&self) -> bool {
matches!(self.state, RecvMessageState::ExtendedConnect)
}
fn any(&self) -> &dyn Any {
self
}
}

View file

@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::hframe::HFrame;
use crate::frames::HFrame;
use crate::{
headers_checks::{headers_valid, is_interim, trailers_valid},
qlog, BufferedStream, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpSendStream, Res,
@ -14,9 +14,11 @@ use crate::{
use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType};
use neqo_qpack::encoder::QPackEncoder;
use neqo_transport::{Connection, StreamId};
use std::any::Any;
use std::cell::RefCell;
use std::cmp::min;
use std::fmt::Debug;
use std::mem;
use std::rc::Rc;
const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2
@ -217,7 +219,7 @@ impl SendStream for SendMessage {
.send_atomic(conn, &buf[..to_send])
.map_err(|e| Error::map_stream_send_errors(&e))?;
debug_assert!(sent);
qlog::h3_data_moved_down(&mut conn.qlog_mut(), self.stream_id(), to_send);
qlog::h3_data_moved_down(conn.qlog_mut(), self.stream_id(), to_send);
Ok(to_send)
}
@ -242,7 +244,7 @@ impl SendStream for SendMessage {
/// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.)
fn send(&mut self, conn: &mut Connection) -> Res<()> {
let sent = Error::map_error(self.stream.send_buffer(conn), Error::HttpInternal(5))?;
qlog::h3_data_moved_down(&mut conn.qlog_mut(), self.stream_id(), sent);
qlog::h3_data_moved_down(conn.qlog_mut(), self.stream_id(), sent);
qtrace!([self], "{} bytes sent", sent);
if !self.stream.has_buffered_data() {
@ -290,6 +292,19 @@ impl SendStream for SendMessage {
fn http_stream(&mut self) -> Option<&mut dyn HttpSendStream> {
Some(self)
}
#[allow(clippy::drop_copy)]
fn send_data_atomic(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<()> {
let data_frame = HFrame::Data {
len: buf.len() as u64,
};
let mut enc = Encoder::default();
data_frame.encode(&mut enc);
self.stream.buffer(&enc);
self.stream.buffer(buf);
mem::drop(self.stream.send_buffer(conn)?);
Ok(())
}
}
impl HttpSendStream for SendMessage {
@ -306,8 +321,13 @@ impl HttpSendStream for SendMessage {
}
fn set_new_listener(&mut self, conn_events: Box<dyn SendStreamEvents>) {
self.stream_type = Http3StreamType::ExtendedConnect;
self.conn_events = conn_events;
}
fn any(&self) -> &dyn Any {
self
}
}
impl ::std::fmt::Display for SendMessage {

View file

@ -17,7 +17,7 @@ use crate::{Http3Parameters, Http3StreamInfo, Res};
use neqo_common::{qtrace, Datagram};
use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker};
use neqo_transport::server::{ActiveConnectionRef, Server, ValidateAddress};
use neqo_transport::{tparams::PreferredAddress, ConnectionIdGenerator, Output};
use neqo_transport::{ConnectionIdGenerator, Output};
use std::cell::RefCell;
use std::cell::RefMut;
use std::collections::HashMap;
@ -84,10 +84,6 @@ impl Http3Server {
self.server.set_ciphers(ciphers);
}
pub fn set_preferred_address(&mut self, spa: PreferredAddress) {
self.server.set_preferred_address(spa);
}
/// Enable encrypted client hello (ECH).
///
/// # Errors
@ -219,10 +215,10 @@ impl Http3Server {
);
}
Http3ServerConnEvent::ExtendedConnectClosed {
stream_id, error, ..
stream_id, reason, ..
} => self.events.webtransport_session_closed(
WebTransportRequest::new(conn.clone(), handler.clone(), stream_id),
error,
reason,
),
Http3ServerConnEvent::ExtendedConnectNewStream(stream_info) => self
.events
@ -470,7 +466,7 @@ mod tests {
);
}
ConnectionEvent::StateChange(State::Connected) => connected = true,
ConnectionEvent::StateChange(_) => (),
ConnectionEvent::StateChange(_) | ConnectionEvent::SendStreamCreatable { .. } => (),
_ => panic!("unexpected event"),
}
}

View file

@ -6,7 +6,7 @@
use crate::connection::Http3State;
use crate::{
features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType},
features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason},
CloseType, Http3StreamInfo, HttpRecvStreamEvents, Priority, RecvStreamEvents, SendStreamEvents,
};
use neqo_common::Header;
@ -52,7 +52,7 @@ pub(crate) enum Http3ServerConnEvent {
ExtendedConnectClosed {
connect_type: ExtendedConnectType,
stream_id: StreamId,
error: Option<AppError>,
reason: SessionCloseReason,
},
ExtendedConnectNewStream(Http3StreamInfo),
}
@ -116,18 +116,24 @@ impl HttpRecvStreamEvents for Http3ServerConnEvents {
}
impl ExtendedConnectEvents for Http3ServerConnEvents {
fn session_start(&self, _connect_type: ExtendedConnectType, _stream_id: StreamId) {}
fn session_start(
&self,
_connect_type: ExtendedConnectType,
_stream_id: StreamId,
_status: u16,
) {
}
fn session_end(
&self,
connect_type: ExtendedConnectType,
stream_id: StreamId,
error: Option<AppError>,
reason: SessionCloseReason,
) {
self.insert(Http3ServerConnEvent::ExtendedConnectClosed {
connect_type,
stream_id,
error,
reason,
});
}

View file

@ -8,7 +8,9 @@
use crate::connection::Http3State;
use crate::connection_server::Http3ServerHandler;
use crate::{Http3StreamInfo, Http3StreamType, Priority, Res};
use crate::{
features::extended_connect::SessionCloseReason, Http3StreamInfo, Http3StreamType, Priority, Res,
};
use neqo_common::{qdebug, qinfo, Header};
use neqo_transport::server::ActiveConnectionRef;
use neqo_transport::{AppError, Connection, StreamId, StreamType};
@ -234,6 +236,11 @@ impl WebTransportRequest {
}
}
#[must_use]
pub fn state(&self) -> Http3State {
self.stream_handler.handler.borrow().state()
}
/// Respond to a `WebTransport` session request.
/// # Errors
/// It may return `InvalidStreamId` if a stream does not exist anymore.
@ -249,6 +256,22 @@ impl WebTransportRequest {
)
}
/// # Errors
/// It may return `InvalidStreamId` if a stream does not exist anymore.
/// Also return an error if the stream was closed on the transport layer,
/// but that information is not yet consumed on the http/3 layer.
pub fn close_session(&mut self, error: u32, message: &str) -> Res<()> {
self.stream_handler
.handler
.borrow_mut()
.webtransport_close_session(
&mut self.stream_handler.conn.borrow_mut(),
self.stream_handler.stream_info.stream_id(),
error,
message,
)
}
#[must_use]
pub fn stream_id(&self) -> StreamId {
self.stream_handler.stream_id()
@ -314,7 +337,7 @@ pub enum WebTransportServerEvent {
},
SessionClosed {
session: WebTransportRequest,
error: Option<AppError>,
reason: SessionCloseReason,
},
NewStream(Http3OrWebTransportStream),
}
@ -473,10 +496,10 @@ impl Http3ServerEvents {
pub(crate) fn webtransport_session_closed(
&self,
session: WebTransportRequest,
error: Option<AppError>,
reason: SessionCloseReason,
) {
self.insert(Http3ServerEvent::WebTransport(
WebTransportServerEvent::SessionClosed { session, error },
WebTransportServerEvent::SessionClosed { session, reason },
));
}

View file

@ -48,6 +48,7 @@ pub struct HSetting {
}
impl HSetting {
#[must_use]
pub fn new(setting_type: HSettingType, value: u64) -> Self {
Self {
setting_type,
@ -62,12 +63,14 @@ pub struct HSettings {
}
impl HSettings {
#[must_use]
pub fn new(settings: &[HSetting]) -> Self {
Self {
settings: settings.to_vec(),
}
}
#[must_use]
pub fn get(&self, setting: HSettingType) -> u64 {
match self.settings.iter().find(|s| s.setting_type == setting) {
Some(v) => v.value,
@ -100,6 +103,8 @@ impl HSettings {
});
}
/// # Errors
/// Returns an error if settings types are reserved of settings value are not permitted.
pub fn decode_frame_contents(&mut self, dec: &mut Decoder) -> Res<()> {
while dec.remaining() > 0 {
let t = dec.decode_varint();
@ -120,9 +125,13 @@ impl HSettings {
(Some(SETTINGS_QPACK_BLOCKED_STREAMS), Some(value)) => self
.settings
.push(HSetting::new(HSettingType::BlockedStreams, value)),
(Some(SETTINGS_ENABLE_WEB_TRANSPORT), Some(value)) => self
.settings
.push(HSetting::new(HSettingType::EnableWebTransport, value)),
(Some(SETTINGS_ENABLE_WEB_TRANSPORT), Some(value)) => {
if value > 1 {
return Err(Error::HttpSettings);
}
self.settings
.push(HSetting::new(HSettingType::EnableWebTransport, value));
}
// other supported settings here
(Some(_), Some(_)) => {} // ignore unknown setting, it is fine.
_ => return Err(Error::NotEnoughData),

View file

@ -7,7 +7,7 @@
#![allow(clippy::module_name_repetitions)]
use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL;
use crate::hframe::H3_FRAME_TYPE_HEADERS;
use crate::frames::H3_FRAME_TYPE_HEADERS;
use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream};
use neqo_common::{qtrace, Decoder, IncrementalDecoderUint, Role};
use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER;
@ -246,7 +246,7 @@ mod tests {
use test_fixture::{connect, now};
use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL;
use crate::hframe::H3_FRAME_TYPE_HEADERS;
use crate::frames::H3_FRAME_TYPE_HEADERS;
use crate::{CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res};
use neqo_common::{Encoder, Role};
use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER;

View file

@ -6,7 +6,7 @@
#![allow(unused_assignments)]
use neqo_common::{event::Provider, qtrace, Datagram};
use neqo_common::{event::Provider, Datagram};
use neqo_crypto::{AuthenticationStatus, ResumptionToken};
use neqo_http3::{
Header, Http3Client, Http3ClientEvent, Http3OrWebTransportStream, Http3Parameters, Http3Server,

View file

@ -16,12 +16,11 @@ use test_fixture::*;
fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) {
let mut out = None;
let mut client_data;
loop {
out = client.process(out, now()).dgram();
client_data = out.is_none();
let client_done = out.is_none();
out = server.process(out, now()).dgram();
if out.is_none() && client_data {
if out.is_none() && client_done {
break;
}
}
@ -107,23 +106,20 @@ fn priority_update() {
client.priority_update(stream_id, update_priority).unwrap();
exchange_packets(&mut client, &mut server);
let priority_event = loop {
let event = server.next_event().unwrap();
if matches!(event, Http3ServerEvent::PriorityUpdate { .. }) {
break event;
}
};
match priority_event {
Http3ServerEvent::PriorityUpdate {
let found = server.events().any(|e| {
if let Http3ServerEvent::PriorityUpdate {
stream_id: update_id,
priority,
} => {
} = e
{
assert_eq!(update_id, stream_id);
assert_eq!(priority, update_priority);
true
} else {
false
}
other => panic!("unexpected server event: {:?}", other),
}
});
assert!(found);
}
#[test]

View file

@ -7,14 +7,15 @@
mod negotiation;
mod sessions;
mod streams;
use neqo_common::event::Provider;
use neqo_crypto::AuthenticationStatus;
use neqo_http3::{
Error, Http3Client, Http3ClientEvent, Http3OrWebTransportStream, Http3Parameters, Http3Server,
Http3ServerEvent, Http3State, WebTransportEvent, WebTransportRequest, WebTransportServerEvent,
features::extended_connect::SessionCloseReason, Error, Http3Client, Http3ClientEvent,
Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, Http3State,
WebTransportEvent, WebTransportRequest, WebTransportServerEvent,
};
use neqo_transport::{AppError, StreamId, StreamType};
use neqo_transport::{StreamId, StreamType};
use std::cell::RefCell;
use std::rc::Rc;
use test_fixture::{
@ -106,6 +107,10 @@ impl WtTest {
Self { client, server }
}
pub fn new_with(mut client: Http3Client, mut server: Http3Server) -> Self {
connect_with(&mut client, &mut server);
Self { client, server }
}
fn negotiate_wt_session(&mut self, accept: bool) -> (StreamId, Option<WebTransportRequest>) {
let wt_session_id = self
.client
@ -147,7 +152,10 @@ impl WtTest {
let wt_session_negotiated_event = |e| {
matches!(
e,
Http3ClientEvent::WebTransport(WebTransportEvent::Session(stream_id)) if stream_id == wt_session_id
Http3ClientEvent::WebTransport(WebTransportEvent::Session{
stream_id,
status
}) if stream_id == wt_session_id && status == 200
)
};
assert!(self.client.events().any(wt_session_negotiated_event));
@ -178,14 +186,14 @@ impl WtTest {
fn session_closed_client(
e: &Http3ClientEvent,
id: StreamId,
expected_error: &Option<AppError>,
expected_reason: &SessionCloseReason,
) -> bool {
if let Http3ClientEvent::WebTransport(WebTransportEvent::SessionClosed {
stream_id,
error,
reason,
}) = e
{
*stream_id == id && error == expected_error
*stream_id == id && reason == expected_reason
} else {
false
}
@ -194,12 +202,12 @@ impl WtTest {
pub fn check_session_closed_event_client(
&mut self,
wt_session_id: StreamId,
expected_error: Option<AppError>,
expected_reason: SessionCloseReason,
) {
let mut event_found = false;
while let Some(event) = self.client.next_event() {
event_found = WtTest::session_closed_client(&event, wt_session_id, &expected_error);
event_found = WtTest::session_closed_client(&event, wt_session_id, &expected_reason);
if event_found {
break;
}
@ -215,14 +223,14 @@ impl WtTest {
fn session_closed_server(
e: &Http3ServerEvent,
id: StreamId,
expected_error: &Option<AppError>,
expected_reason: &SessionCloseReason,
) -> bool {
if let Http3ServerEvent::WebTransport(WebTransportServerEvent::SessionClosed {
session,
error,
reason,
}) = e
{
session.stream_id() == id && error == expected_error
session.stream_id() == id && reason == expected_reason
} else {
false
}
@ -231,13 +239,13 @@ impl WtTest {
pub fn check_session_closed_event_server(
&mut self,
wt_session: &mut WebTransportRequest,
expected_error: Option<AppError>,
expected_reeason: SessionCloseReason,
) {
let event = self.server.next_event().unwrap();
assert!(WtTest::session_closed_server(
&event,
wt_session.stream_id(),
&expected_error
&expected_reeason
));
}
@ -345,7 +353,7 @@ impl WtTest {
expected_stop_sending_ids: &[StreamId],
expected_error_stream_stop_sending: Option<u64>,
expected_local: bool,
expected_session_close: Option<(StreamId, Option<u64>)>,
expected_session_close: Option<(StreamId, SessionCloseReason)>,
) {
let mut reset_ids_count = 0;
let mut stop_sending_ids_count = 0;
@ -369,11 +377,11 @@ impl WtTest {
}
Http3ClientEvent::WebTransport(WebTransportEvent::SessionClosed {
stream_id,
error,
reason,
}) => {
close_event = true;
assert_eq!(stream_id, expected_session_close.unwrap().0);
assert_eq!(expected_session_close.unwrap().1, error);
assert_eq!(stream_id, expected_session_close.as_ref().unwrap().0);
assert_eq!(expected_session_close.as_ref().unwrap().1, reason);
}
_ => {}
}
@ -486,7 +494,7 @@ impl WtTest {
expected_error_stream_reset: Option<u64>,
expected_stop_sending_ids: &[StreamId],
expected_error_stream_stop_sending: Option<u64>,
expected_session_close: Option<(StreamId, Option<u64>)>,
expected_session_close: Option<(StreamId, SessionCloseReason)>,
) {
let mut reset_ids_count = 0;
let mut stop_sending_ids_count = 0;
@ -505,11 +513,14 @@ impl WtTest {
}
Http3ServerEvent::WebTransport(WebTransportServerEvent::SessionClosed {
session,
error,
reason,
}) => {
close_event = true;
assert_eq!(session.stream_id(), expected_session_close.unwrap().0);
assert_eq!(expected_session_close.unwrap().1, error);
assert_eq!(
session.stream_id(),
expected_session_close.as_ref().unwrap().0
);
assert_eq!(expected_session_close.as_ref().unwrap().1, reason);
}
_ => {}
}
@ -518,4 +529,19 @@ impl WtTest {
assert_eq!(stop_sending_ids_count, expected_stop_sending_ids.len());
assert_eq!(close_event, expected_session_close.is_some());
}
pub fn session_close_frame_client(&mut self, session_id: StreamId, error: u32, message: &str) {
self.client
.webtransport_close_session(session_id, error, message)
.unwrap();
}
pub fn session_close_frame_server(
&mut self,
wt_session: &mut WebTransportRequest,
error: u32,
message: &str,
) {
wt_session.close_session(error, message).unwrap();
}
}

View file

@ -5,8 +5,13 @@
// except according to those terms.
use super::{connect, default_http3_client, default_http3_server, exchange_packets};
use neqo_common::event::Provider;
use neqo_http3::{Http3Client, Http3ClientEvent, Http3State, WebTransportEvent};
use neqo_common::{event::Provider, Encoder};
use neqo_crypto::AuthenticationStatus;
use neqo_http3::{
settings::{HSetting, HSettingType, HSettings},
Error, HFrame, Http3Client, Http3ClientEvent, Http3State, WebTransportEvent,
};
use neqo_transport::{Connection, ConnectionError, StreamType};
use std::time::Duration;
use test_fixture::*;
@ -103,3 +108,49 @@ fn zero_rtt_wt_settings() {
zero_rtt(false, true, true, false);
zero_rtt(false, true, true, true);
}
fn exchange_packets2(client: &mut Http3Client, server: &mut Connection) {
let mut out = None;
loop {
out = client.process(out, now()).dgram();
out = server.process(out, now()).dgram();
if out.is_none() {
break;
}
}
}
#[test]
fn wrong_setting_value() {
const CONTROL_STREAM_TYPE: &[u8] = &[0x0];
let mut client = default_http3_client(false);
let mut server = default_server_h3();
exchange_packets2(&mut client, &mut server);
client.authenticated(AuthenticationStatus::Ok, now());
exchange_packets2(&mut client, &mut server);
let control = server.stream_create(StreamType::UniDi).unwrap();
server.stream_send(control, CONTROL_STREAM_TYPE).unwrap();
// Encode a settings frame and send it.
let mut enc = Encoder::default();
let settings = HFrame::Settings {
settings: HSettings::new(&[HSetting::new(HSettingType::EnableWebTransport, 2)]),
};
settings.encode(&mut enc);
assert_eq!(
server.stream_send(control, &enc[..]).unwrap(),
enc[..].len()
);
exchange_packets2(&mut client, &mut server);
match client.state() {
Http3State::Closing(err) | Http3State::Closed(err) => {
assert_eq!(
err,
ConnectionError::Application(Error::HttpSettings.code())
);
}
_ => panic!("Wrong state {:?}", client.state()),
};
}

View file

@ -4,9 +4,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::webtransport::WtTest;
use neqo_http3::Error;
use crate::webtransport::{default_http3_server, WtTest};
use neqo_common::{event::Provider, Encoder};
use neqo_http3::{
features::extended_connect::SessionCloseReason, frames::WebTransportFrame, Error, Header,
Http3ClientEvent, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent,
Http3State, Priority, WebTransportEvent, WebTransportServerEvent,
};
use neqo_transport::{ConnectionParameters, StreamType};
use std::mem;
use test_fixture::{http3_client_with_params, now};
#[test]
fn wt_session() {
@ -19,7 +26,7 @@ fn wt_session_reject() {
let mut wt = WtTest::new();
let (wt_session_id, _wt_session) = wt.negotiate_wt_session(false);
wt.check_session_closed_event_client(wt_session_id, Some(Error::HttpRequestRejected.code()));
wt.check_session_closed_event_client(wt_session_id, SessionCloseReason::Status(404));
}
#[test]
@ -28,7 +35,10 @@ fn wt_session_close_client() {
let mut wt_session = wt.create_wt_session();
wt.cancel_session_client(wt_session.stream_id());
wt.check_session_closed_event_server(&mut wt_session, Some(Error::HttpNoError.code()));
wt.check_session_closed_event_server(
&mut wt_session,
SessionCloseReason::Error(Error::HttpNoError.code()),
);
}
#[test]
@ -37,7 +47,10 @@ fn wt_session_close_server() {
let mut wt_session = wt.create_wt_session();
wt.cancel_session_server(&mut wt_session);
wt.check_session_closed_event_client(wt_session.stream_id(), Some(Error::HttpNoError.code()));
wt.check_session_closed_event_client(
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
);
}
#[test]
@ -49,7 +62,10 @@ fn wt_session_close_server_close_send() {
wt.exchange_packets();
wt.check_session_closed_event_client(
wt_session.stream_id(),
Some(Error::HttpGeneralProtocolStream.code()),
SessionCloseReason::Clean {
error: 0,
message: "".to_string(),
},
);
}
@ -62,7 +78,10 @@ fn wt_session_close_server_stop_sending() {
.stream_stop_sending(Error::HttpNoError.code())
.unwrap();
wt.exchange_packets();
wt.check_session_closed_event_client(wt_session.stream_id(), Some(Error::HttpNoError.code()));
wt.check_session_closed_event_client(
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
);
}
#[test]
@ -74,5 +93,315 @@ fn wt_session_close_server_reset() {
.stream_reset_send(Error::HttpNoError.code())
.unwrap();
wt.exchange_packets();
wt.check_session_closed_event_client(wt_session.stream_id(), Some(Error::HttpNoError.code()));
wt.check_session_closed_event_client(
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
);
}
#[test]
fn wt_session_respone_with_1xx() {
let mut wt = WtTest::new();
let wt_session_id = wt
.client
.webtransport_create_session(now(), &("https", "something.com", "/"), &[])
.unwrap();
wt.exchange_packets();
let mut wt_server_session = None;
while let Some(event) = wt.server.next_event() {
if let Http3ServerEvent::WebTransport(WebTransportServerEvent::NewSession {
session,
headers,
}) = event
{
assert!(
headers
.iter()
.any(|h| h.name() == ":method" && h.value() == "CONNECT")
&& headers
.iter()
.any(|h| h.name() == ":protocol" && h.value() == "webtransport")
);
wt_server_session = Some(session);
}
}
let mut wt_server_session = wt_server_session.unwrap();
// Send interim response.
wt_server_session
.send_headers(&[Header::new(":status", "111")])
.unwrap();
wt_server_session.response(true).unwrap();
wt.exchange_packets();
let wt_session_negotiated_event = |e| {
matches!(
e,
Http3ClientEvent::WebTransport(WebTransportEvent::Session{
stream_id,
status
}) if stream_id == wt_session_id && status == 200
)
};
assert!(wt.client.events().any(wt_session_negotiated_event));
assert_eq!(wt_session_id, wt_server_session.stream_id());
}
#[test]
fn wt_session_respone_200_with_fin() {
let mut wt = WtTest::new();
let wt_session_id = wt
.client
.webtransport_create_session(now(), &("https", "something.com", "/"), &[])
.unwrap();
wt.exchange_packets();
let mut wt_server_session = None;
while let Some(event) = wt.server.next_event() {
if let Http3ServerEvent::WebTransport(WebTransportServerEvent::NewSession {
session,
headers,
}) = event
{
assert!(
headers
.iter()
.any(|h| h.name() == ":method" && h.value() == "CONNECT")
&& headers
.iter()
.any(|h| h.name() == ":protocol" && h.value() == "webtransport")
);
wt_server_session = Some(session);
}
}
let mut wt_server_session = wt_server_session.unwrap();
wt_server_session.response(true).unwrap();
wt_server_session.stream_close_send().unwrap();
wt.exchange_packets();
let wt_session_close_event = |e| {
matches!(
e,
Http3ClientEvent::WebTransport(WebTransportEvent::SessionClosed{
stream_id,
reason
}) if stream_id == wt_session_id && reason == SessionCloseReason::Clean{ error: 0, message: "".to_string()}
)
};
assert!(wt.client.events().any(wt_session_close_event));
assert_eq!(wt_session_id, wt_server_session.stream_id());
}
#[test]
fn wt_session_close_frame_client() {
const ERROR_NUM: u32 = 23;
const ERROR_MESSAGE: &str = "Something went wrong";
let mut wt = WtTest::new();
let mut wt_session = wt.create_wt_session();
wt.session_close_frame_client(wt_session.stream_id(), ERROR_NUM, ERROR_MESSAGE);
wt.exchange_packets();
wt.check_session_closed_event_server(
&mut wt_session,
SessionCloseReason::Clean {
error: ERROR_NUM,
message: ERROR_MESSAGE.to_string(),
},
);
}
#[test]
fn wt_session_close_frame_server() {
const ERROR_NUM: u32 = 23;
const ERROR_MESSAGE: &str = "Something went wrong";
let mut wt = WtTest::new();
let mut wt_session = wt.create_wt_session();
wt.session_close_frame_server(&mut wt_session, ERROR_NUM, ERROR_MESSAGE);
wt.exchange_packets();
wt.check_session_closed_event_client(
wt_session.stream_id(),
SessionCloseReason::Clean {
error: ERROR_NUM,
message: ERROR_MESSAGE.to_string(),
},
);
}
#[test]
fn wt_unknown_session_frame_client() {
const UNKNOWN_FRAME_LEN: usize = 832;
const BUF: &[u8] = &[0; 10];
const ERROR_NUM: u32 = 23;
const ERROR_MESSAGE: &str = "Something went wrong";
let mut wt = WtTest::new();
let mut wt_session = wt.create_wt_session();
// Send an unknown frame.
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
enc.encode_varint(1028_u64); // Arbitrary type.
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
let mut buf: Vec<_> = enc.into();
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
wt.client.send_data(wt_session.stream_id(), &buf).unwrap();
wt.exchange_packets();
// The session is still active
let mut unidi_server = wt.create_wt_stream_server(&mut wt_session, StreamType::UniDi);
wt.send_data_server(&mut unidi_server, BUF);
wt.receive_data_client(unidi_server.stream_id(), true, BUF, false);
// Now close the session.
wt.session_close_frame_client(wt_session.stream_id(), ERROR_NUM, ERROR_MESSAGE);
wt.exchange_packets();
wt.check_events_after_closing_session_client(
&[unidi_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
&[],
None,
false,
None,
);
wt.check_events_after_closing_session_server(
&[],
None,
&[unidi_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
Some((
wt_session.stream_id(),
SessionCloseReason::Clean {
error: ERROR_NUM,
message: ERROR_MESSAGE.to_string(),
},
)),
);
}
#[test]
fn wt_close_session_frame_broken_client() {
let mut wt = WtTest::new();
let mut wt_session = wt.create_wt_session();
// Send a incorrect CloseSession frame.
let mut enc = Encoder::default();
WebTransportFrame::CloseSession {
error: 5,
message: "Hello".to_string(),
}
.encode(&mut enc);
let mut buf: Vec<_> = enc.into();
// Corrupt the string.
buf[9] = 0xff;
wt.client.send_data(wt_session.stream_id(), &buf).unwrap();
wt.exchange_packets();
// check that the webtransport session is closed.
wt.check_session_closed_event_client(
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpGeneralProtocolStream.code()),
);
wt.check_session_closed_event_server(
&mut wt_session,
SessionCloseReason::Error(Error::HttpGeneralProtocolStream.code()),
);
// The Http3 session is still working.
assert_eq!(wt.client.state(), Http3State::Connected);
assert_eq!(wt_session.state(), Http3State::Connected);
}
fn receive_request(server: &mut Http3Server) -> Option<Http3OrWebTransportStream> {
while let Some(event) = server.next_event() {
if let Http3ServerEvent::Headers { stream, .. } = event {
return Some(stream);
}
}
None
}
#[test]
fn wt_close_session_cannot_be_sent_at_once() {
const LIMIT: u64 = 500;
const BUF: &[u8] = &[0; 443];
const ERROR_NUM: u32 = 23;
const ERROR_MESSAGE: &str = "Something went wrong";
let client = http3_client_with_params(
Http3Parameters::default()
.webtransport(true)
.connection_parameters(ConnectionParameters::default().max_data(LIMIT)),
);
let server = default_http3_server(true);
let mut wt = WtTest::new_with(client, server);
let mut wt_session = wt.create_wt_session();
// Fill the flow control window using an unrelated http stream.
let req_id = wt
.client
.fetch(
now(),
"GET",
&("https", "something.com", "/"),
&[],
Priority::default(),
)
.unwrap();
assert_eq!(req_id, 4);
wt.exchange_packets();
let mut req = receive_request(&mut wt.server).unwrap();
req.send_headers(&[
Header::new(":status", "200"),
Header::new("content-length", BUF.len().to_string()),
])
.unwrap();
req.send_data(BUF).unwrap();
// Now close the session.
wt.session_close_frame_server(&mut wt_session, ERROR_NUM, ERROR_MESSAGE);
// server cannot create new streams.
assert_eq!(
wt_session.create_stream(StreamType::UniDi),
Err(Error::InvalidStreamId)
);
let out = wt.server.process(None, now()).dgram();
let out = wt.client.process(out, now()).dgram();
// Client has not received the full CloseSession frame and it can create more streams.
let unidi_client = wt.create_wt_stream_client(wt_session.stream_id(), StreamType::UniDi);
let out = wt.server.process(out, now()).dgram();
let out = wt.client.process(out, now()).dgram();
let out = wt.server.process(out, now()).dgram();
let out = wt.client.process(out, now()).dgram();
let out = wt.server.process(out, now()).dgram();
let _out = wt.client.process(out, now()).dgram();
wt.check_events_after_closing_session_client(
&[],
None,
&[unidi_client],
Some(Error::HttpRequestCancelled.code()),
false,
Some((
wt_session.stream_id(),
SessionCloseReason::Clean {
error: ERROR_NUM,
message: ERROR_MESSAGE.to_string(),
},
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
}

View file

@ -5,7 +5,7 @@
// except according to those terms.
use crate::webtransport::WtTest;
use neqo_http3::Error;
use neqo_http3::{features::extended_connect::SessionCloseReason, Error};
use neqo_transport::StreamType;
use std::mem;
@ -274,7 +274,10 @@ fn wt_client_session_close_1() {
Some(Error::HttpRequestCancelled.code()),
&[bidi_from_client],
Some(Error::HttpRequestCancelled.code()),
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -306,7 +309,10 @@ fn wt_client_session_close_2() {
Some(Error::HttpRequestCancelled.code()),
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -339,7 +345,10 @@ fn wt_client_session_close_3() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(&[], None, &[], None, false, None);
@ -365,7 +374,10 @@ fn wt_client_session_close_4() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -398,7 +410,10 @@ fn wt_client_session_close_5() {
Some(Error::HttpNoError.code()),
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(&[], None, &[], None, false, None);
@ -422,7 +437,10 @@ fn wt_client_session_close_6() {
Some(Error::HttpRequestCancelled.code()),
&[bidi_from_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -453,7 +471,10 @@ fn wt_client_session_close_7() {
None,
&[unidi_from_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -485,7 +506,10 @@ fn wt_client_session_close_8() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(&[], None, &[], None, false, None);
@ -509,7 +533,10 @@ fn wt_client_session_close_9() {
None,
&[unidi_server.stream_id()],
Some(Error::HttpNoError.code()),
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(&[], None, &[], None, false, None);
@ -533,7 +560,10 @@ fn wt_client_session_close_10() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -567,7 +597,10 @@ fn wt_client_session_close_11() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(&[], None, &[], None, false, None);
@ -592,7 +625,10 @@ fn wt_client_session_close_12() {
None,
&[],
None,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -626,7 +662,10 @@ fn wt_client_session_close_13() {
Some(Error::HttpRequestCancelled.code()),
&[bidi_client_1, bidi_client_2],
Some(Error::HttpRequestCancelled.code()),
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_client(
@ -671,7 +710,10 @@ fn wt_client_session_server_close_1() {
&[bidi_client],
Some(Error::HttpRequestCancelled.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(
@ -702,7 +744,10 @@ fn wt_client_session_server_close_2() {
&[unidi_client],
Some(Error::HttpRequestCancelled.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(
@ -735,7 +780,10 @@ fn wt_client_session_server_close_3() {
&[],
None,
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -761,7 +809,10 @@ fn wt_client_session_server_close_4() {
&[unidi_client],
Some(Error::HttpNoError.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -786,7 +837,10 @@ fn wt_client_session_server_close_5() {
&[bidi_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(
@ -817,7 +871,10 @@ fn wt_client_session_server_close_6() {
&[],
None,
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(
&[],
@ -849,7 +906,10 @@ fn wt_client_session_server_close_7() {
&[],
None,
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -875,7 +935,10 @@ fn wt_client_session_server_close_8() {
&[],
None,
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -904,7 +967,10 @@ fn wt_client_session_server_close_9() {
&[],
None,
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -930,7 +996,10 @@ fn wt_client_session_server_close_10() {
&[bidi_server.stream_id()],
Some(Error::HttpNoError.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(&[], None, &[], None, None);
@ -958,7 +1027,10 @@ fn wt_client_session_server_close_11() {
&[bidi_client_1, bidi_client_2],
Some(Error::HttpRequestCancelled.code()),
false,
Some((wt_session.stream_id(), Some(Error::HttpNoError.code()))),
Some((
wt_session.stream_id(),
SessionCloseReason::Error(Error::HttpNoError.code()),
)),
);
wt.check_events_after_closing_session_server(
@ -969,3 +1041,41 @@ fn wt_client_session_server_close_11() {
None,
);
}
#[test]
fn wt_session_close_frame_and_streams_client() {
const BUF: &[u8] = &[0; 10];
const ERROR_NUM: u32 = 23;
const ERROR_MESSAGE: &str = "Something went wrong";
let mut wt = WtTest::new();
let mut wt_session = wt.create_wt_session();
let mut unidi_server = wt.create_wt_stream_server(&mut wt_session, StreamType::UniDi);
wt.send_data_server(&mut unidi_server, BUF);
wt.exchange_packets();
wt.session_close_frame_client(wt_session.stream_id(), ERROR_NUM, ERROR_MESSAGE);
wt.check_events_after_closing_session_client(
&[unidi_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
&[],
None,
false,
None,
);
wt.exchange_packets();
wt.check_events_after_closing_session_server(
&[],
None,
&[unidi_server.stream_id()],
Some(Error::HttpRequestCancelled.code()),
Some((
wt_session.stream_id(),
SessionCloseReason::Clean {
error: ERROR_NUM,
message: ERROR_MESSAGE.to_string(),
},
)),
);
}

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"ee62c8454626c0be39d18492af0e89e8d5badec991e2ca1db457a6d845dbd45e","src/decoder.rs":"6404dc3f93799aa0d3589817de9e2e2a5162d1235ff9dc66011df326e7f1f0d9","src/decoder_instructions.rs":"19d47158bc09551b449be205f5cd5ea83e6984c4e4d3e7d4b95938b09617015e","src/encoder.rs":"a902a3161fac8a0daea25ca15afb2333df9da01a8b5e5db92a82fe28cec417b8","src/encoder_instructions.rs":"a7f1d3a4f8ae941286d0aba81037a8df3ef85e275392ef31d9938e9314c706db","src/header_block.rs":"7910ddc28b44d2065070cb2d87ab3cfbb905cce912b23d8b12b0f0add5691ceb","src/huffman.rs":"3a9edaf827343ec6e43cfd50fcc0d0077287947160ae630da5c3ddaaefedd010","src/huffman_decode_helper.rs":"2970c57f052878b727c2f764490c54184f5c2608e1d6aa961c3b01509e290122","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"29c5e47f8a4cf9c0a5dfdc614594868db22bc25b9688e5efdbc041cd222a17e5","src/prefix.rs":"72c587c40aef4ed38cf13b2de91091d671611679be2a9da6f0b24abafaf50dc5","src/qlog.rs":"7618085e27bb3fb1f4d1c73ba501b9a293723293c4020b7cc4129676eb278131","src/qpack_send_buf.rs":"ca620f64e2d9c1514bbac3e968ff692d0f8f60d99e805e05c173c74c56da39ee","src/reader.rs":"448729cb2fc7857914d093f9d1ca00b27f013666f834463ef6569c23f2ddf597","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"f7091bdd9ad1f8fe3b2298a7dbfd3d285c212d69569cda54f9bcf251cb758a21"},"package":null}
{"files":{"Cargo.toml":"ffbf61e358a81f58d8b4f0ca89031d53879ec8e45851a0f1bc2b583229feb815","src/decoder.rs":"8bd336c91cca989883106a9d0bf26b117d224e0e7643960c3e97d0168d1853c4","src/decoder_instructions.rs":"19d47158bc09551b449be205f5cd5ea83e6984c4e4d3e7d4b95938b09617015e","src/encoder.rs":"e72cbcdbe24cbe13ad5cbcbb0df8981a2ea67331f296ec7784480bc28ae01eef","src/encoder_instructions.rs":"a7f1d3a4f8ae941286d0aba81037a8df3ef85e275392ef31d9938e9314c706db","src/header_block.rs":"7910ddc28b44d2065070cb2d87ab3cfbb905cce912b23d8b12b0f0add5691ceb","src/huffman.rs":"3a9edaf827343ec6e43cfd50fcc0d0077287947160ae630da5c3ddaaefedd010","src/huffman_decode_helper.rs":"2970c57f052878b727c2f764490c54184f5c2608e1d6aa961c3b01509e290122","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"29c5e47f8a4cf9c0a5dfdc614594868db22bc25b9688e5efdbc041cd222a17e5","src/prefix.rs":"72c587c40aef4ed38cf13b2de91091d671611679be2a9da6f0b24abafaf50dc5","src/qlog.rs":"7618085e27bb3fb1f4d1c73ba501b9a293723293c4020b7cc4129676eb278131","src/qpack_send_buf.rs":"49ded6607ec0859cb3edc5a38ff48f4d2d292f0721673d4e20700d07ac324557","src/reader.rs":"be265cc8c317512f266fafdcc835d0e413caf5280a7cc945bfe6e7e849529d67","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"f7091bdd9ad1f8fe3b2298a7dbfd3d285c212d69569cda54f9bcf251cb758a21"},"package":null}

View file

@ -1,6 +1,6 @@
[package]
name = "neqo-qpack"
version = "0.5.6"
version = "0.5.7"
authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
edition = "2018"
license = "MIT/Apache-2.0"

View file

@ -236,9 +236,10 @@ impl QPackDecoder {
/// # Panics
/// When a stream has already been added.
pub fn add_send_stream(&mut self, stream_id: StreamId) {
if self.local_stream_id.is_some() {
panic!("Adding multiple local streams");
}
assert!(
self.local_stream_id.is_none(),
"Adding multiple local streams"
);
self.local_stream_id = Some(stream_id);
}

View file

@ -124,7 +124,7 @@ impl QPackEncoder {
loop {
let mut recv = ReceiverConnWrapper::new(conn, stream_id);
match self.instruction_reader.read_instructions(&mut recv) {
Ok(instruction) => self.call_instruction(instruction, &mut conn.qlog_mut())?,
Ok(instruction) => self.call_instruction(instruction, conn.qlog_mut())?,
Err(Error::NeedMoreData) => break Ok(()),
Err(e) => break Err(e),
}

View file

@ -88,10 +88,10 @@ impl QpackData {
}
pub fn read(&mut self, r: usize) {
if r > self.buf.len() {
panic!("want to set more byte read than remaing in the buffer.");
}
assert!(
r <= self.buf.len(),
"want to set more bytes read than remain in the buffer."
);
self.buf = self.buf.split_off(r);
}
}

View file

@ -328,18 +328,11 @@ pub(crate) mod test_receiver {
use super::{Error, ReadByte, Reader, Res};
use std::collections::VecDeque;
#[derive(Default)]
pub struct TestReceiver {
buf: VecDeque<u8>,
}
impl Default for TestReceiver {
fn default() -> Self {
Self {
buf: VecDeque::new(),
}
}
}
impl ReadByte for TestReceiver {
fn read_byte(&mut self) -> Res<u8> {
self.buf.pop_back().ok_or(Error::NeedMoreData)

File diff suppressed because one or more lines are too long

View file

@ -1,6 +1,6 @@
[package]
name = "neqo-transport"
version = "0.5.6"
version = "0.5.7"
authors = ["EKR <ekr@rtfm.com>", "Andy Grover <agrover@mozilla.com>"]
edition = "2018"
license = "MIT/Apache-2.0"

View file

@ -165,11 +165,7 @@ impl AddressValidation {
now: Instant,
) -> Option<ConnectionId> {
let peer_addr = Self::encode_aad(peer_address, retry);
let data = if let Ok(d) = self.self_encrypt.open(&peer_addr, token) {
d
} else {
return None;
};
let data = self.self_encrypt.open(&peer_addr, token).ok()?;
let mut dec = Decoder::new(&data);
match dec.decode_uint(4) {
Some(d) => {

View file

@ -52,7 +52,6 @@ pub struct Cubic {
k: f64,
w_max: f64,
ca_epoch_start: Option<Instant>,
last_phase_was_tcp: bool,
tcp_acked_bytes: f64,
}
@ -64,7 +63,6 @@ impl Default for Cubic {
k: 0.0,
w_max: 0.0,
ca_epoch_start: None,
last_phase_was_tcp: false,
tcp_acked_bytes: 0.0,
}
}

View file

@ -12,15 +12,9 @@ use std::fmt::{self, Display};
use crate::cc::classic_cc::WindowAdjustment;
use std::time::{Duration, Instant};
#[derive(Debug)]
#[derive(Debug, Default)]
pub struct NewReno {}
impl Default for NewReno {
fn default() -> Self {
Self {}
}
}
impl Display for NewReno {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NewReno")?;

View file

@ -333,7 +333,6 @@ pub type RemoteConnectionIdEntry = ConnectionIdEntry<[u8; 16]>;
#[derive(Debug, Default)]
pub struct ConnectionIdStore<SRT: Clone + PartialEq> {
cids: SmallVec<[ConnectionIdEntry<SRT>; 8]>,
retired: Vec<[ConnectionIdEntry<SRT>; 8]>,
}
impl<SRT: Clone + PartialEq> ConnectionIdStore<SRT> {
@ -446,7 +445,7 @@ impl ConnectionIdManager {
// won't be sent until until after the handshake completes, because this initial
// value remains until the connection completes and transport parameters are handled.
limit: 2,
next_seqno: 2, // A different value.
next_seqno: 1,
lost_new_connection_id: Vec::new(),
}
}
@ -464,11 +463,10 @@ impl ConnectionIdManager {
}
if let Some(cid) = self.generator.borrow_mut().generate_cid() {
assert_ne!(cid.len(), 0);
self.connection_ids.add_local(ConnectionIdEntry::new(
CONNECTION_ID_SEQNO_PREFERRED,
cid.clone(),
(),
));
debug_assert_eq!(self.next_seqno, CONNECTION_ID_SEQNO_PREFERRED);
self.connection_ids
.add_local(ConnectionIdEntry::new(self.next_seqno, cid.clone(), ()));
self.next_seqno += 1;
let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap();
Ok((cid, srt))

View file

@ -393,7 +393,7 @@ impl Connection {
streams: Streams::new(tphandler, role, events.clone()),
connection_ids: ConnectionIdStore::default(),
state_signaling: StateSignaling::Idle,
loss_recovery: LossRecovery::new(stats.clone()),
loss_recovery: LossRecovery::new(stats.clone(), conn_params.get_fast_pto()),
events,
new_token: NewTokenState::new(role),
stats,
@ -691,8 +691,8 @@ impl Connection {
let tps = &self.tps;
if let Agent::Server(ref mut s) = self.crypto.tls {
let mut enc = Encoder::default();
enc.encode_vvec_with(|mut enc_inner| {
tps.borrow().local.encode(&mut enc_inner);
enc.encode_vvec_with(|enc_inner| {
tps.borrow().local.encode(enc_inner);
});
enc.encode(extra);
let records = s.send_ticket(now, &enc)?;

View file

@ -5,6 +5,7 @@
// except according to those terms.
use crate::connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT};
pub use crate::recovery::FAST_PTO_SCALE;
use crate::recv_stream::RECV_BUFFER_SIZE;
use crate::rtt::GRANULARITY;
use crate::stream_id::StreamType;
@ -70,6 +71,7 @@ pub struct ConnectionParameters {
datagram_size: u64,
outgoing_datagram_queue: usize,
incoming_datagram_queue: usize,
fast_pto: u8,
}
impl Default for ConnectionParameters {
@ -89,6 +91,7 @@ impl Default for ConnectionParameters {
datagram_size: 0,
outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT,
incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT,
fast_pto: FAST_PTO_SCALE,
}
}
}
@ -246,6 +249,30 @@ impl ConnectionParameters {
self
}
pub fn get_fast_pto(&self) -> u8 {
self.fast_pto
}
/// Scale the PTO timer. A value of `FAST_PTO_SCALE` follows the spec, a smaller
/// value does not, but produces more probes with the intent of ensuring lower
/// latency in the event of tail loss. A value of `FAST_PTO_SCALE/4` is quite
/// aggressive. Smaller values (other than zero) are not rejected, but could be
/// very wasteful. Values greater than `FAST_PTO_SCALE` delay probes and could
/// reduce performance. It should not be possible to increase the PTO timer by
/// too much based on the range of valid values, but a maximum value of 255 will
/// result in very poor performance.
/// Scaling PTO this way does not affect when persistent congestion is declared,
/// but may change how many retransmissions are sent before declaring persistent
/// congestion.
///
/// # Panics
/// A value of 0 is invalid and will cause a panic.
pub fn fast_pto(mut self, scale: u8) -> Self {
assert_ne!(scale, 0);
self.fast_pto = scale;
self
}
pub fn create_transport_parameter(
&self,
role: Role,

View file

@ -901,7 +901,7 @@ fn damaged_ech_config(config: &[u8]) -> Vec<u8> {
let mut cfg = Vec::from(config);
// Ensure that the version and config_id is correct.
assert_eq!(cfg[2], 0xfe);
assert_eq!(cfg[3], 0x0a);
assert_eq!(cfg[3], 0x0d);
assert_eq!(cfg[6], ECH_CONFIG_ID);
// Change the config_id so that the server doesn't recognize it.
cfg[6] ^= 0x94;

View file

@ -6,12 +6,15 @@
use super::super::{Connection, ConnectionParameters, Output, State};
use super::{
assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt,
default_client, default_server, fill_cwnd, maybe_authenticate, send_and_receive,
send_something, AT_LEAST_PTO, DEFAULT_RTT, POST_HANDSHAKE_CWND,
assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, cwnd,
default_client, default_server, fill_cwnd, maybe_authenticate, new_client, send_and_receive,
send_something, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, POST_HANDSHAKE_CWND,
};
use crate::cc::CWND_MIN;
use crate::path::PATH_MTU_V6;
use crate::recovery::{MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT};
use crate::recovery::{
FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT,
};
use crate::rtt::GRANULARITY;
use crate::stats::MAX_PTO_COUNTS;
use crate::tparams::TransportParameter;
@ -717,3 +720,91 @@ fn ping_with_ack_min() {
trickle(&mut sender, &mut receiver, 1, now);
assert_eq!(receiver.stats().frame_tx.ping, 0);
}
/// This calculates the PTO timer immediately after connection establishment.
/// It depends on there only being 2 RTT samples in the handshake.
fn expected_pto(rtt: Duration) -> Duration {
// PTO calculation is rtt + 4rttvar + ack delay.
// rttvar should be (rtt + 4 * (rtt / 2) * (3/4)^n + 25ms)/2
// where n is the number of round trips
// This uses a 25ms ack delay as the ACK delay extension
// is negotiated and no ACK_DELAY frame has been received.
rtt + rtt * 9 / 8 + Duration::from_millis(25)
}
#[test]
fn fast_pto() {
let mut client = new_client(ConnectionParameters::default().fast_pto(FAST_PTO_SCALE / 2));
let mut server = default_server();
let mut now = connect_rtt_idle(&mut client, &mut server, DEFAULT_RTT);
let res = client.process(None, now);
let idle_timeout = ConnectionParameters::default().get_idle_timeout() - (DEFAULT_RTT / 2);
assert_eq!(res, Output::Callback(idle_timeout));
// Send data on two streams
let stream = client.stream_create(StreamType::UniDi).unwrap();
assert_eq!(
client.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(),
DEFAULT_STREAM_DATA.len()
);
// Send a packet after some time.
now += idle_timeout / 2;
let dgram = client.process_output(now).dgram();
assert!(dgram.is_some());
// Nothing to do, should return a callback.
let cb = client.process_output(now).callback();
assert_eq!(expected_pto(DEFAULT_RTT) / 2, cb);
// Once the PTO timer expires, a PTO packet should be sent should want to send PTO packet.
now += cb;
let dgram = client.process(None, now).dgram();
let stream_before = server.stats().frame_rx.stream;
server.process_input(dgram.unwrap(), now);
assert_eq!(server.stats().frame_rx.stream, stream_before + 1);
}
/// Even if the PTO timer is slowed right down, persistent congestion is declared
/// based on the "true" value of the timer.
#[test]
fn fast_pto_persistent_congestion() {
let mut client = new_client(ConnectionParameters::default().fast_pto(FAST_PTO_SCALE * 2));
let mut server = default_server();
let mut now = connect_rtt_idle(&mut client, &mut server, DEFAULT_RTT);
let res = client.process(None, now);
let idle_timeout = ConnectionParameters::default().get_idle_timeout() - (DEFAULT_RTT / 2);
assert_eq!(res, Output::Callback(idle_timeout));
// Send packets spaced by the PTO timer. And lose them.
// Note: This timing is a tiny bit higher than the client will use
// to determine persistent congestion. The ACK below adds another RTT
// estimate, which will reduce rttvar by 3/4, so persistent congestion
// will occur at `rtt + rtt*27/32 + 25ms`.
// That is OK as we're still showing that this interval is less than
// six times the PTO, which is what would be used if the scaling
// applied to the PTO used to determine persistent congestion.
let pc_interval = expected_pto(DEFAULT_RTT) * 3;
println!("pc_interval {:?}", pc_interval);
let _drop1 = send_something(&mut client, now);
// Check that the PTO matches expectations.
let cb = client.process_output(now).callback();
assert_eq!(expected_pto(DEFAULT_RTT) * 2, cb);
now += pc_interval;
let _drop2 = send_something(&mut client, now);
let _drop3 = send_something(&mut client, now);
let _drop4 = send_something(&mut client, now);
let dgram = send_something(&mut client, now);
// Now acknowledge the tail packet and enter persistent congestion.
now += DEFAULT_RTT / 2;
let ack = server.process(Some(dgram), now).dgram();
now += DEFAULT_RTT / 2;
client.process_input(ack.unwrap(), now);
assert_eq!(cwnd(&client), CWND_MIN);
}

View file

@ -916,3 +916,49 @@ fn session_flow_control_affects_all_streams() {
SMALL_MAX_DATA
);
}
fn connect_w_different_limit(bidi_limit: u64, unidi_limit: u64) {
let mut client = default_client();
let out = client.process(None, now());
let mut server = new_server(
ConnectionParameters::default()
.max_streams(StreamType::BiDi, bidi_limit)
.max_streams(StreamType::UniDi, unidi_limit),
);
let out = server.process(out.dgram(), now());
let out = client.process(out.dgram(), now());
mem::drop(server.process(out.dgram(), now()));
assert!(maybe_authenticate(&mut client));
let mut bidi_events = 0;
let mut unidi_events = 0;
let mut connected_events = 0;
for e in client.events() {
match e {
ConnectionEvent::SendStreamCreatable { stream_type } => {
if stream_type == StreamType::BiDi {
bidi_events += 1;
} else {
unidi_events += 1;
}
}
ConnectionEvent::StateChange(state) if state == State::Connected => {
connected_events += 1;
}
_ => {}
}
}
assert_eq!(bidi_events, usize::from(bidi_limit > 0));
assert_eq!(unidi_events, usize::from(unidi_limit > 0));
assert_eq!(connected_events, 1);
}
#[test]
fn client_stream_creatable_event() {
connect_w_different_limit(0, 0);
connect_w_different_limit(0, 1);
connect_w_different_limit(1, 0);
connect_w_different_limit(1, 1);
}

View file

@ -10,6 +10,7 @@
use std::cmp::{max, min};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::mem;
use std::ops::RangeInclusive;
use std::time::{Duration, Instant};
@ -44,6 +45,8 @@ pub const PTO_PACKET_COUNT: usize = 2;
pub(crate) const MAX_OUTSTANDING_UNACK: usize = 200;
/// Disable PING until this many packets are outstanding.
pub(crate) const MIN_OUTSTANDING_UNACK: usize = 16;
/// The scale we use for the fast PTO feature.
pub const FAST_PTO_SCALE: u8 = 100;
#[derive(Debug, Clone)]
#[allow(clippy::module_name_repetitions)]
@ -562,16 +565,20 @@ pub(crate) struct LossRecovery {
spaces: LossRecoverySpaces,
qlog: NeqoQlog,
stats: StatsCell,
/// The factor by which the PTO period is reduced.
/// This enables faster probing at a cost in additional lost packets.
fast_pto: u8,
}
impl LossRecovery {
pub fn new(stats: StatsCell) -> Self {
pub fn new(stats: StatsCell, fast_pto: u8) -> Self {
Self {
confirmed_time: None,
pto_state: None,
spaces: LossRecoverySpaces::default(),
qlog: NeqoQlog::default(),
stats,
fast_pto,
}
}
@ -818,16 +825,25 @@ impl LossRecovery {
rtt: &RttEstimate,
pto_state: Option<&PtoState>,
pn_space: PacketNumberSpace,
fast_pto: u8,
) -> Duration {
// This is a complicated (but safe) way of calculating:
// base_pto * F * 2^pto_count
// where F = fast_pto / FAST_PTO_SCALE (== 1 by default)
let pto_count = pto_state.map_or(0, |p| u32::try_from(p.count).unwrap_or(0));
rtt.pto(pn_space)
.checked_mul(1 << pto_state.map_or(0, |p| p.count))
.unwrap_or_else(|| Duration::from_secs(3600))
.checked_mul(
u32::from(fast_pto)
.checked_shl(pto_count)
.unwrap_or(u32::MAX),
)
.map_or(Duration::from_secs(3600), |p| p / u32::from(FAST_PTO_SCALE))
}
/// Get the current PTO period for the given packet number space.
/// Unlike calling `RttEstimate::pto` directly, this includes exponential backoff.
fn pto_period(&self, rtt: &RttEstimate, pn_space: PacketNumberSpace) -> Duration {
Self::pto_period_inner(rtt, self.pto_state.as_ref(), pn_space)
Self::pto_period_inner(rtt, self.pto_state.as_ref(), pn_space, self.fast_pto)
}
// Calculate PTO time for the given space.
@ -918,6 +934,7 @@ impl LossRecovery {
primary_path.borrow().rtt(),
self.pto_state.as_ref(),
space.space(),
self.fast_pto,
);
space.detect_lost_packets(now, loss_delay, pto, &mut lost_packets);
@ -978,7 +995,9 @@ impl ::std::fmt::Display for LossRecovery {
#[cfg(test)]
mod tests {
use super::{LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket};
use super::{
LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE,
};
use crate::cc::CongestionControlAlgorithm;
use crate::cid::{ConnectionId, ConnectionIdEntry};
use crate::packet::PacketType;
@ -1064,7 +1083,7 @@ mod tests {
);
path.set_primary(true);
Self {
lr: LossRecovery::new(StatsCell::default()),
lr: LossRecovery::new(StatsCell::default(), FAST_PTO_SCALE),
path: Rc::new(RefCell::new(path)),
}
}

View file

@ -71,7 +71,7 @@ impl RttEstimate {
pub fn update(
&mut self,
mut qlog: &mut NeqoQlog,
qlog: &mut NeqoQlog,
mut rtt_sample: Duration,
ack_delay: Duration,
confirmed: bool,
@ -114,7 +114,7 @@ impl RttEstimate {
self.rttvar
);
qlog::metrics_updated(
&mut qlog,
qlog,
&[
QlogMetric::LatestRtt(self.latest_rtt),
QlogMetric::MinRtt(self.min_rtt),

View file

@ -20,7 +20,6 @@ use crate::addr_valid::{AddressValidation, AddressValidationResult};
use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef};
use crate::connection::{Connection, Output, State};
use crate::packet::{PacketBuilder, PacketType, PublicPacket};
use crate::tparams::PreferredAddress;
use crate::{ConnectionParameters, QuicVersion, Res};
use std::cell::RefCell;
@ -158,8 +157,6 @@ pub struct Server {
zero_rtt_checker: ServerZeroRttChecker,
/// A connection ID generator.
cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>,
/// The preferred address(es).
preferred_address: Option<PreferredAddress>,
/// Connection parameters.
conn_params: ConnectionParameters,
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
@ -210,7 +207,6 @@ impl Server {
anti_replay,
zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker),
cid_generator,
preferred_address: None,
conn_params,
active_attempts: HashMap::default(),
connections: Rc::default(),
@ -239,11 +235,6 @@ impl Server {
self.ciphers = Vec::from(ciphers.as_ref());
}
/// Set a preferred address.
pub fn set_preferred_address(&mut self, spa: PreferredAddress) {
self.preferred_address = Some(spa);
}
pub fn enable_ech(
&mut self,
config: u8,

View file

@ -467,6 +467,13 @@ impl Streams {
.remote()
.get_integer(tparams::INITIAL_MAX_DATA),
);
if self.local_stream_limits[StreamType::BiDi].available() > 0 {
self.events.send_stream_creatable(StreamType::BiDi);
}
if self.local_stream_limits[StreamType::UniDi].available() > 0 {
self.events.send_stream_creatable(StreamType::UniDi);
}
}
pub fn handle_max_streams(&mut self, stream_type: StreamType, maximum_streams: u64) {