forked from mirrors/gecko-dev
Bug 1889691 - Update to Neqo 0.7.3 r=necko-reviewers,valentin
Differential Revision: https://phabricator.services.mozilla.com/D206884
This commit is contained in:
parent
24c1c6b41a
commit
245ce0bcb1
67 changed files with 910 additions and 1346 deletions
|
|
@ -85,9 +85,9 @@ git = "https://github.com/mozilla/mp4parse-rust"
|
|||
rev = "a138e40ec1c603615873e524b5b22e11c0ec4820"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/mozilla/neqo?tag=v0.7.2"]
|
||||
[source."git+https://github.com/mozilla/neqo?tag=v0.7.3"]
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
tag = "v0.7.2"
|
||||
tag = "v0.7.3"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/mozilla/uniffi-rs.git?rev=afb29ebdc1d9edf15021b1c5332fc9f285bbe13b"]
|
||||
|
|
|
|||
21
Cargo.lock
generated
21
Cargo.lock
generated
|
|
@ -3947,8 +3947,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-common"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.2#ce5cbe4dfc2e38b238abb022c39eee4215058221"
|
||||
version = "0.7.3"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.3#1dc8ea33e27b65a0294ff3502204285358fc4a77"
|
||||
dependencies = [
|
||||
"enum-map",
|
||||
"env_logger",
|
||||
|
|
@ -3960,13 +3960,14 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-crypto"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.2#ce5cbe4dfc2e38b238abb022c39eee4215058221"
|
||||
version = "0.7.3"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.3#1dc8ea33e27b65a0294ff3502204285358fc4a77"
|
||||
dependencies = [
|
||||
"bindgen 0.69.4",
|
||||
"log",
|
||||
"mozbuild",
|
||||
"neqo-common",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"toml",
|
||||
|
|
@ -3974,8 +3975,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-http3"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.2#ce5cbe4dfc2e38b238abb022c39eee4215058221"
|
||||
version = "0.7.3"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.3#1dc8ea33e27b65a0294ff3502204285358fc4a77"
|
||||
dependencies = [
|
||||
"enumset",
|
||||
"log",
|
||||
|
|
@ -3991,8 +3992,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-qpack"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.2#ce5cbe4dfc2e38b238abb022c39eee4215058221"
|
||||
version = "0.7.3"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.3#1dc8ea33e27b65a0294ff3502204285358fc4a77"
|
||||
dependencies = [
|
||||
"log",
|
||||
"neqo-common",
|
||||
|
|
@ -4004,8 +4005,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-transport"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.2#ce5cbe4dfc2e38b238abb022c39eee4215058221"
|
||||
version = "0.7.3"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.7.3#1dc8ea33e27b65a0294ff3502204285358fc4a77"
|
||||
dependencies = [
|
||||
"indexmap 1.9.3",
|
||||
"log",
|
||||
|
|
|
|||
|
|
@ -9,10 +9,10 @@ license = "MPL-2.0"
|
|||
name = "neqo_glue"
|
||||
|
||||
[dependencies]
|
||||
neqo-http3 = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
nserror = { path = "../../../xpcom/rust/nserror" }
|
||||
nsstring = { path = "../../../xpcom/rust/nsstring" }
|
||||
xpcom = { path = "../../../xpcom/rust/xpcom" }
|
||||
|
|
@ -27,10 +27,10 @@ uuid = { version = "1.0", features = ["v4"] }
|
|||
winapi = {version = "0.3", features = ["ws2def"] }
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.7.2"
|
||||
tag = "v0.7.3"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
||||
[features]
|
||||
fuzzing = ["neqo-http3/fuzzing"]
|
||||
fuzzing = ["neqo-http3/disable-encryption"]
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ impl NeqoHttp3Conn {
|
|||
max_accumlated_time_ms: u32,
|
||||
) -> Result<RefPtr<NeqoHttp3Conn>, nsresult> {
|
||||
// Nss init.
|
||||
init();
|
||||
init().map_err(|_| NS_ERROR_UNEXPECTED)?;
|
||||
|
||||
let origin_conv = str::from_utf8(origin).map_err(|_| NS_ERROR_INVALID_ARG)?;
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@ edition = "2018"
|
|||
license = "MPL-2.0"
|
||||
|
||||
[dependencies]
|
||||
neqo-transport = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.7.2", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.7.3", git = "https://github.com/mozilla/neqo" }
|
||||
mio = "0.6.17"
|
||||
mio-extras = "2.0.5"
|
||||
log = "0.4.0"
|
||||
|
|
@ -21,7 +21,7 @@ tokio = { version = "1", features = ["rt-multi-thread"] }
|
|||
mozilla-central-workspace-hack = { version = "0.1", features = ["http3server"], optional = true }
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.7.2"
|
||||
tag = "v0.7.3"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
|
|
|||
|
|
@ -1378,7 +1378,7 @@ fn main() -> Result<(), io::Error> {
|
|||
}
|
||||
});
|
||||
|
||||
init_db(PathBuf::from(args[1].clone()));
|
||||
init_db(PathBuf::from(args[1].clone())).unwrap();
|
||||
|
||||
let mut servers_runner = ServersRunner::new()?;
|
||||
servers_runner.init();
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"b49758e5e8f0a6955d761e689be39530f193f7089de07f2295a7a3aef4df5898","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"fd239f75d374db6ff744211344c82bcd19ecf753e07410e1fe37732bbb81dfe9","src/datagram.rs":"f2ff56faa0e513edbf4331b6ee2c9e6d6111483bda7aff08d16b9f05bce5c320","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"112dc758e65301b8a7a508b125d3d61063180d432bffaec566a050d4f907ab18","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"a86aae69900933bf83044fa96166ee51216277415eafcdb15c04a907bb2dd10e","src/log.rs":"7246053bffd704b264d42fc82f986b9d62079472a76a9fc3749c25cfc7698532","src/qlog.rs":"9b081f32bf158fd340300693acc97fe0554b617ae664eba86e4d3572e2b1e16e","src/timer.rs":"350a730cc5a159dfdac5d78ec8e8a34c5172a476d827a566703edec24c791842","src/tos.rs":"440616cb0aee9082abe00623b33e68dbe80eda47aec889ac5f4145b1566bf692","src/udp.rs":"2b92132e078791e35b66f68d99d79ff5df55efd03e788474f7781a00403a5533","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
||||
{"files":{"Cargo.toml":"a5ff5210d8d2f1210eff53a0c3f9b9f930d9259b83d58b3c2104665f722d5e2c","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"fd239f75d374db6ff744211344c82bcd19ecf753e07410e1fe37732bbb81dfe9","src/datagram.rs":"691ad94a3618d6bf5202a7911419b5e75e318d09c8cc57a9a542a864dcc764ec","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"112dc758e65301b8a7a508b125d3d61063180d432bffaec566a050d4f907ab18","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"32b1902796d06129eb66b16195e45eed8a95ae79017eda72c2c347e232645126","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"9b081f32bf158fd340300693acc97fe0554b617ae664eba86e4d3572e2b1e16e","src/tos.rs":"baec87b4f8a6253b88cd257730bd1e3147c046ef993288b08235d54a24f88fbe","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
|
||||
24
third_party/rust/neqo-common/Cargo.toml
vendored
24
third_party/rust/neqo-common/Cargo.toml
vendored
|
|
@ -13,7 +13,7 @@
|
|||
edition = "2021"
|
||||
rust-version = "1.74.0"
|
||||
name = "neqo-common"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = "build.rs"
|
||||
homepage = "https://github.com/mozilla/neqo/"
|
||||
|
|
@ -39,26 +39,14 @@ default-features = false
|
|||
version = "0.12"
|
||||
default-features = false
|
||||
|
||||
[dependencies.quinn-udp]
|
||||
git = "https://github.com/quinn-rs/quinn/"
|
||||
rev = "a947962131aba8a6521253d03cc948b20098a2d6"
|
||||
optional = true
|
||||
|
||||
[dependencies.time]
|
||||
version = "0.3"
|
||||
features = ["formatting"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.tokio]
|
||||
version = "1"
|
||||
features = [
|
||||
"net",
|
||||
"time",
|
||||
"macros",
|
||||
"rt",
|
||||
"rt-multi-thread",
|
||||
]
|
||||
optional = true
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.5"
|
||||
features = ["html_reports"]
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.test-fixture]
|
||||
|
|
@ -66,10 +54,6 @@ path = "../test-fixture"
|
|||
|
||||
[features]
|
||||
ci = []
|
||||
udp = [
|
||||
"dep:quinn-udp",
|
||||
"dep:tokio",
|
||||
]
|
||||
|
||||
[target."cfg(windows)".dependencies.winapi]
|
||||
version = "0.3"
|
||||
|
|
|
|||
15
third_party/rust/neqo-common/src/datagram.rs
vendored
15
third_party/rust/neqo-common/src/datagram.rs
vendored
|
|
@ -54,10 +54,8 @@ impl Datagram {
|
|||
self.ttl
|
||||
}
|
||||
|
||||
#[cfg(feature = "udp")]
|
||||
#[must_use]
|
||||
pub(crate) fn into_data(self) -> Vec<u8> {
|
||||
self.d
|
||||
pub fn set_tos(&mut self, tos: IpTos) {
|
||||
self.tos = tos;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -83,6 +81,12 @@ impl std::fmt::Debug for Datagram {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<Datagram> for Vec<u8> {
|
||||
fn from(datagram: Datagram) -> Self {
|
||||
datagram.d
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use test_fixture::datagram;
|
||||
|
||||
|
|
@ -90,8 +94,7 @@ use test_fixture::datagram;
|
|||
fn fmt_datagram() {
|
||||
let d = datagram([0; 1].to_vec());
|
||||
assert_eq!(
|
||||
format!("{d:?}"),
|
||||
&format!("{d:?}"),
|
||||
"Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00"
|
||||
.to_string()
|
||||
);
|
||||
}
|
||||
|
|
|
|||
3
third_party/rust/neqo-common/src/lib.rs
vendored
3
third_party/rust/neqo-common/src/lib.rs
vendored
|
|
@ -14,10 +14,7 @@ pub mod hrtime;
|
|||
mod incrdecoder;
|
||||
pub mod log;
|
||||
pub mod qlog;
|
||||
pub mod timer;
|
||||
pub mod tos;
|
||||
#[cfg(feature = "udp")]
|
||||
pub mod udp;
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
|
|
|
|||
21
third_party/rust/neqo-common/src/log.rs
vendored
21
third_party/rust/neqo-common/src/log.rs
vendored
|
|
@ -50,7 +50,7 @@ fn since_start() -> Duration {
|
|||
START_TIME.get_or_init(Instant::now).elapsed()
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
pub fn init(level_filter: Option<log::LevelFilter>) {
|
||||
static INIT_ONCE: Once = Once::new();
|
||||
|
||||
if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off {
|
||||
|
|
@ -59,6 +59,9 @@ pub fn init() {
|
|||
|
||||
INIT_ONCE.call_once(|| {
|
||||
let mut builder = Builder::from_env("RUST_LOG");
|
||||
if let Some(filter) = level_filter {
|
||||
builder.filter_level(filter);
|
||||
}
|
||||
builder.format(|buf, record| {
|
||||
let elapsed = since_start();
|
||||
writeln!(
|
||||
|
|
@ -71,9 +74,9 @@ pub fn init() {
|
|||
)
|
||||
});
|
||||
if let Err(e) = builder.try_init() {
|
||||
do_log!(::log::Level::Info, "Logging initialization error {:?}", e);
|
||||
do_log!(::log::Level::Warn, "Logging initialization error {:?}", e);
|
||||
} else {
|
||||
do_log!(::log::Level::Info, "Logging initialized");
|
||||
do_log!(::log::Level::Debug, "Logging initialized");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
@ -81,32 +84,32 @@ pub fn init() {
|
|||
#[macro_export]
|
||||
macro_rules! log_invoke {
|
||||
($lvl:expr, $ctx:expr, $($arg:tt)*) => ( {
|
||||
::neqo_common::log::init();
|
||||
::neqo_common::log::init(None);
|
||||
::neqo_common::do_log!($lvl, "[{}] {}", $ctx, format!($($arg)*));
|
||||
} )
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qerror {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Error, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } );
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qwarn {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Warn, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } );
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qinfo {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Info, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } );
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qdebug {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Debug, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } );
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qtrace {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Trace, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } );
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } );
|
||||
}
|
||||
|
|
|
|||
396
third_party/rust/neqo-common/src/timer.rs
vendored
396
third_party/rust/neqo-common/src/timer.rs
vendored
|
|
@ -1,396 +0,0 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::{
|
||||
mem,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Internal structure for a timer item.
|
||||
struct TimerItem<T> {
|
||||
time: Instant,
|
||||
item: T,
|
||||
}
|
||||
|
||||
impl<T> TimerItem<T> {
|
||||
fn time(ti: &Self) -> Instant {
|
||||
ti.time
|
||||
}
|
||||
}
|
||||
|
||||
/// A timer queue.
|
||||
/// This uses a classic timer wheel arrangement, with some characteristics that might be considered
|
||||
/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut
|
||||
/// points). Time is relative, the wheel has an origin time and it is unable to represent times that
|
||||
/// are more than `granularity * capacity` past that time.
|
||||
pub struct Timer<T> {
|
||||
items: Vec<Vec<TimerItem<T>>>,
|
||||
now: Instant,
|
||||
granularity: Duration,
|
||||
cursor: usize,
|
||||
}
|
||||
|
||||
impl<T> Timer<T> {
|
||||
/// Construct a new wheel at the given granularity, starting at the given time.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// When `capacity` is too large to fit in `u32` or `granularity` is zero.
|
||||
pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self {
|
||||
assert!(u32::try_from(capacity).is_ok());
|
||||
assert!(granularity.as_nanos() > 0);
|
||||
let mut items = Vec::with_capacity(capacity);
|
||||
items.resize_with(capacity, Default::default);
|
||||
Self {
|
||||
items,
|
||||
now,
|
||||
granularity,
|
||||
cursor: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reference to the time of the next entry.
|
||||
#[must_use]
|
||||
pub fn next_time(&self) -> Option<Instant> {
|
||||
for i in 0..self.items.len() {
|
||||
let idx = self.bucket(i);
|
||||
if let Some(t) = self.items[idx].first() {
|
||||
return Some(t.time);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Get the full span of time that this can cover.
|
||||
/// Two timers cannot be more than this far apart.
|
||||
/// In practice, this value is less by one amount of the timer granularity.
|
||||
#[inline]
|
||||
#[allow(clippy::cast_possible_truncation)] // guarded by assertion
|
||||
#[must_use]
|
||||
pub fn span(&self) -> Duration {
|
||||
self.granularity * (self.items.len() as u32)
|
||||
}
|
||||
|
||||
/// For the given `time`, get the number of whole buckets in the future that is.
|
||||
#[inline]
|
||||
#[allow(clippy::cast_possible_truncation)] // guarded by assertion
|
||||
fn delta(&self, time: Instant) -> usize {
|
||||
// This really should use Duration::div_duration_f??(), but it can't yet.
|
||||
((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn time_bucket(&self, time: Instant) -> usize {
|
||||
self.bucket(self.delta(time))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bucket(&self, delta: usize) -> usize {
|
||||
debug_assert!(delta < self.items.len());
|
||||
(self.cursor + delta) % self.items.len()
|
||||
}
|
||||
|
||||
/// Slide forward in time by `n * self.granularity`.
|
||||
#[allow(clippy::cast_possible_truncation, clippy::reversed_empty_ranges)]
|
||||
// cast_possible_truncation is ok because we have an assertion guard.
|
||||
// reversed_empty_ranges is to avoid different types on the if/else.
|
||||
fn tick(&mut self, n: usize) {
|
||||
let new = self.bucket(n);
|
||||
let iter = if new < self.cursor {
|
||||
(self.cursor..self.items.len()).chain(0..new)
|
||||
} else {
|
||||
(self.cursor..new).chain(0..0)
|
||||
};
|
||||
for i in iter {
|
||||
assert!(self.items[i].is_empty());
|
||||
}
|
||||
self.now += self.granularity * (n as u32);
|
||||
self.cursor = new;
|
||||
}
|
||||
|
||||
/// Asserts if the time given is in the past or too far in the future.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// When `time` is in the past relative to previous calls.
|
||||
pub fn add(&mut self, time: Instant, item: T) {
|
||||
assert!(time >= self.now);
|
||||
// Skip forward quickly if there is too large a gap.
|
||||
let short_span = self.span() - self.granularity;
|
||||
if time >= (self.now + self.span() + short_span) {
|
||||
// Assert that there aren't any items.
|
||||
for i in &self.items {
|
||||
debug_assert!(i.is_empty());
|
||||
}
|
||||
self.now = time.checked_sub(short_span).unwrap();
|
||||
self.cursor = 0;
|
||||
}
|
||||
|
||||
// Adjust time forward the minimum amount necessary.
|
||||
let mut d = self.delta(time);
|
||||
if d >= self.items.len() {
|
||||
self.tick(1 + d - self.items.len());
|
||||
d = self.items.len() - 1;
|
||||
}
|
||||
|
||||
let bucket = self.bucket(d);
|
||||
let ins = match self.items[bucket].binary_search_by_key(&time, TimerItem::time) {
|
||||
Ok(j) | Err(j) => j,
|
||||
};
|
||||
self.items[bucket].insert(ins, TimerItem { time, item });
|
||||
}
|
||||
|
||||
/// Given knowledge of the time an item was added, remove it.
|
||||
/// This requires use of a predicate that identifies matching items.
|
||||
pub fn remove<F>(&mut self, time: Instant, mut selector: F) -> Option<T>
|
||||
where
|
||||
F: FnMut(&T) -> bool,
|
||||
{
|
||||
if time < self.now {
|
||||
return None;
|
||||
}
|
||||
if time > self.now + self.span() {
|
||||
return None;
|
||||
}
|
||||
let bucket = self.time_bucket(time);
|
||||
let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time)
|
||||
else {
|
||||
return None;
|
||||
};
|
||||
// start_index is just one of potentially many items with the same time.
|
||||
// Search backwards for a match, ...
|
||||
for i in (0..=start_index).rev() {
|
||||
if self.items[bucket][i].time != time {
|
||||
break;
|
||||
}
|
||||
if selector(&self.items[bucket][i].item) {
|
||||
return Some(self.items[bucket].remove(i).item);
|
||||
}
|
||||
}
|
||||
// ... then forwards.
|
||||
for i in (start_index + 1)..self.items[bucket].len() {
|
||||
if self.items[bucket][i].time != time {
|
||||
break;
|
||||
}
|
||||
if selector(&self.items[bucket][i].item) {
|
||||
return Some(self.items[bucket].remove(i).item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Take the next item, unless there are no items with
|
||||
/// a timeout in the past relative to `until`.
|
||||
pub fn take_next(&mut self, until: Instant) -> Option<T> {
|
||||
for i in 0..self.items.len() {
|
||||
let idx = self.bucket(i);
|
||||
if !self.items[idx].is_empty() && self.items[idx][0].time <= until {
|
||||
return Some(self.items[idx].remove(0).item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Create an iterator that takes all items until the given time.
|
||||
/// Note: Items might be removed even if the iterator is not fully exhausted.
|
||||
pub fn take_until(&mut self, until: Instant) -> impl Iterator<Item = T> {
|
||||
let get_item = move |x: TimerItem<T>| x.item;
|
||||
if until >= self.now + self.span() {
|
||||
// Drain everything, so a clean sweep.
|
||||
let mut empty_items = Vec::with_capacity(self.items.len());
|
||||
empty_items.resize_with(self.items.len(), Vec::default);
|
||||
let mut items = mem::replace(&mut self.items, empty_items);
|
||||
self.now = until;
|
||||
self.cursor = 0;
|
||||
|
||||
let tail = items.split_off(self.cursor);
|
||||
return tail.into_iter().chain(items).flatten().map(get_item);
|
||||
}
|
||||
|
||||
// Only returning a partial span, so do it bucket at a time.
|
||||
let delta = self.delta(until);
|
||||
let mut buckets = Vec::with_capacity(delta + 1);
|
||||
|
||||
// First, the whole buckets.
|
||||
for i in 0..delta {
|
||||
let idx = self.bucket(i);
|
||||
buckets.push(mem::take(&mut self.items[idx]));
|
||||
}
|
||||
self.tick(delta);
|
||||
|
||||
// Now we need to split the last bucket, because there might be
|
||||
// some items with `item.time > until`.
|
||||
let bucket = &mut self.items[self.cursor];
|
||||
let last_idx = match bucket.binary_search_by_key(&until, TimerItem::time) {
|
||||
Ok(mut m) => {
|
||||
// If there are multiple values, the search will hit any of them.
|
||||
// Make sure to get them all.
|
||||
while m < bucket.len() && bucket[m].time == until {
|
||||
m += 1;
|
||||
}
|
||||
m
|
||||
}
|
||||
Err(ins) => ins,
|
||||
};
|
||||
let tail = bucket.split_off(last_idx);
|
||||
buckets.push(mem::replace(bucket, tail));
|
||||
// This tomfoolery with the empty vector ensures that
|
||||
// the returned type here matches the one above precisely
|
||||
// without having to invoke the `either` crate.
|
||||
buckets.into_iter().chain(vec![]).flatten().map(get_item)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use super::{Duration, Instant, Timer};
|
||||
|
||||
fn now() -> Instant {
|
||||
static NOW: OnceLock<Instant> = OnceLock::new();
|
||||
*NOW.get_or_init(Instant::now)
|
||||
}
|
||||
|
||||
const GRANULARITY: Duration = Duration::from_millis(10);
|
||||
const CAPACITY: usize = 10;
|
||||
#[test]
|
||||
fn create() {
|
||||
let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
assert_eq!(t.span(), Duration::from_millis(100));
|
||||
assert_eq!(None, t.next_time());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn immediate_entry() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
t.add(now(), 12);
|
||||
assert_eq!(now(), t.next_time().expect("should have an entry"));
|
||||
let values: Vec<_> = t.take_until(now()).collect();
|
||||
assert_eq!(vec![12], values);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn same_time() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let v1 = 12;
|
||||
let v2 = 13;
|
||||
t.add(now(), v1);
|
||||
t.add(now(), v2);
|
||||
assert_eq!(now(), t.next_time().expect("should have an entry"));
|
||||
let values: Vec<_> = t.take_until(now()).collect();
|
||||
assert!(values.contains(&v1));
|
||||
assert!(values.contains(&v2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let near_future = now() + Duration::from_millis(17);
|
||||
let v = 9;
|
||||
t.add(near_future, v);
|
||||
assert_eq!(near_future, t.next_time().expect("should return a value"));
|
||||
assert_eq!(
|
||||
t.take_until(near_future.checked_sub(Duration::from_millis(1)).unwrap())
|
||||
.count(),
|
||||
0
|
||||
);
|
||||
assert!(t
|
||||
.take_until(near_future + Duration::from_millis(1))
|
||||
.any(|x| x == v));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_future() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let future = now() + Duration::from_millis(117);
|
||||
let v = 9;
|
||||
t.add(future, v);
|
||||
assert_eq!(future, t.next_time().expect("should return a value"));
|
||||
assert!(t.take_until(future).any(|x| x == v));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_far_future() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let far_future = now() + Duration::from_millis(892);
|
||||
let v = 9;
|
||||
t.add(far_future, v);
|
||||
assert_eq!(far_future, t.next_time().expect("should return a value"));
|
||||
assert!(t.take_until(far_future).any(|x| x == v));
|
||||
}
|
||||
|
||||
const TIMES: &[Duration] = &[
|
||||
Duration::from_millis(40),
|
||||
Duration::from_millis(91),
|
||||
Duration::from_millis(6),
|
||||
Duration::from_millis(3),
|
||||
Duration::from_millis(22),
|
||||
Duration::from_millis(40),
|
||||
];
|
||||
|
||||
fn with_times() -> Timer<usize> {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
for (i, time) in TIMES.iter().enumerate() {
|
||||
t.add(now() + *time, i);
|
||||
}
|
||||
assert_eq!(
|
||||
now() + *TIMES.iter().min().unwrap(),
|
||||
t.next_time().expect("should have a time")
|
||||
);
|
||||
t
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)] // false positive
|
||||
fn multiple_values() {
|
||||
let mut t = with_times();
|
||||
let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect();
|
||||
for i in 0..TIMES.len() {
|
||||
assert!(values.contains(&i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)] // false positive
|
||||
fn take_far_future() {
|
||||
let mut t = with_times();
|
||||
let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect();
|
||||
for i in 0..TIMES.len() {
|
||||
assert!(values.contains(&i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_each() {
|
||||
let mut t = with_times();
|
||||
for (i, time) in TIMES.iter().enumerate() {
|
||||
assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i));
|
||||
}
|
||||
assert_eq!(None, t.next_time());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_future() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let future = now() + Duration::from_millis(117);
|
||||
let v = 9;
|
||||
t.add(future, v);
|
||||
|
||||
assert_eq!(Some(v), t.remove(future, |candidate| *candidate == v));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_too_far_future() {
|
||||
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
|
||||
let future = now() + Duration::from_millis(117);
|
||||
let too_far_future = now() + t.span() + Duration::from_millis(117);
|
||||
let v = 9;
|
||||
t.add(future, v);
|
||||
|
||||
assert_eq!(None, t.remove(too_far_future, |candidate| *candidate == v));
|
||||
}
|
||||
}
|
||||
48
third_party/rust/neqo-common/src/tos.rs
vendored
48
third_party/rust/neqo-common/src/tos.rs
vendored
|
|
@ -36,7 +36,7 @@ impl From<IpTosEcn> for u8 {
|
|||
|
||||
impl From<u8> for IpTosEcn {
|
||||
fn from(v: u8) -> Self {
|
||||
match v & 0b11 {
|
||||
match v & 0b0000_0011 {
|
||||
0b00 => IpTosEcn::NotEct,
|
||||
0b01 => IpTosEcn::Ect1,
|
||||
0b10 => IpTosEcn::Ect0,
|
||||
|
|
@ -47,8 +47,8 @@ impl From<u8> for IpTosEcn {
|
|||
}
|
||||
|
||||
impl From<IpTos> for IpTosEcn {
|
||||
fn from(value: IpTos) -> Self {
|
||||
IpTosEcn::from(value.0 & 0x3)
|
||||
fn from(v: IpTos) -> Self {
|
||||
IpTosEcn::from(u8::from(v))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -166,14 +166,13 @@ impl From<u8> for IpTosDscp {
|
|||
}
|
||||
|
||||
impl From<IpTos> for IpTosDscp {
|
||||
fn from(value: IpTos) -> Self {
|
||||
IpTosDscp::from(value.0 & 0xfc)
|
||||
fn from(v: IpTos) -> Self {
|
||||
IpTosDscp::from(u8::from(v))
|
||||
}
|
||||
}
|
||||
|
||||
/// The type-of-service field in an IP packet.
|
||||
#[allow(clippy::module_name_repetitions)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Default)]
|
||||
pub struct IpTos(u8);
|
||||
|
||||
impl From<IpTosEcn> for IpTos {
|
||||
|
|
@ -215,15 +214,19 @@ impl From<u8> for IpTos {
|
|||
impl Debug for IpTos {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("IpTos")
|
||||
.field(&IpTosDscp::from(self.0 & 0xfc))
|
||||
.field(&IpTosEcn::from(self.0 & 0x3))
|
||||
.field(&IpTosDscp::from(*self))
|
||||
.field(&IpTosEcn::from(*self))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for IpTos {
|
||||
fn default() -> Self {
|
||||
(IpTosDscp::default(), IpTosEcn::default()).into()
|
||||
impl IpTos {
|
||||
pub fn set_ecn(&mut self, ecn: IpTosEcn) {
|
||||
self.0 = u8::from(IpTosDscp::from(*self)) | u8::from(ecn);
|
||||
}
|
||||
|
||||
pub fn set_dscp(&mut self, dscp: IpTosDscp) {
|
||||
self.0 = u8::from(IpTosEcn::from(*self)) | u8::from(dscp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -322,4 +325,25 @@ mod tests {
|
|||
assert_eq!(tos, u8::from(iptos));
|
||||
assert_eq!(IpTos::from(tos), iptos);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iptos_to_iptosdscp() {
|
||||
let tos = IpTos::from((IpTosDscp::Af41, IpTosEcn::NotEct));
|
||||
let dscp = IpTosDscp::from(tos);
|
||||
assert_eq!(dscp, IpTosDscp::Af41);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tos_modify_ecn() {
|
||||
let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::NotEct).into();
|
||||
iptos.set_ecn(IpTosEcn::Ce);
|
||||
assert_eq!(u8::from(iptos), 0b1000_1011);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tos_modify_dscp() {
|
||||
let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::Ect1).into();
|
||||
iptos.set_dscp(IpTosDscp::Le);
|
||||
assert_eq!(u8::from(iptos), 0b0000_0101);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
222
third_party/rust/neqo-common/src/udp.rs
vendored
222
third_party/rust/neqo-common/src/udp.rs
vendored
|
|
@ -1,222 +0,0 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::missing_errors_doc)] // Functions simply delegate to tokio and quinn-udp.
|
||||
#![allow(clippy::missing_panics_doc)] // Functions simply delegate to tokio and quinn-udp.
|
||||
|
||||
use std::{
|
||||
io::{self, IoSliceMut},
|
||||
net::{SocketAddr, ToSocketAddrs},
|
||||
slice,
|
||||
};
|
||||
|
||||
use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState};
|
||||
use tokio::io::Interest;
|
||||
|
||||
use crate::{Datagram, IpTos};
|
||||
|
||||
/// Socket receive buffer size.
|
||||
///
|
||||
/// Allows reading multiple datagrams in a single [`Socket::recv`] call.
|
||||
const RECV_BUF_SIZE: usize = u16::MAX as usize;
|
||||
|
||||
pub struct Socket {
|
||||
socket: tokio::net::UdpSocket,
|
||||
state: UdpSocketState,
|
||||
recv_buf: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Socket {
|
||||
/// Calls [`std::net::UdpSocket::bind`] and instantiates [`quinn_udp::UdpSocketState`].
|
||||
pub fn bind<A: ToSocketAddrs>(addr: A) -> Result<Self, io::Error> {
|
||||
let socket = std::net::UdpSocket::bind(addr)?;
|
||||
|
||||
Ok(Self {
|
||||
state: quinn_udp::UdpSocketState::new((&socket).into())?,
|
||||
socket: tokio::net::UdpSocket::from_std(socket)?,
|
||||
recv_buf: vec![0; RECV_BUF_SIZE],
|
||||
})
|
||||
}
|
||||
|
||||
/// See [`tokio::net::UdpSocket::local_addr`].
|
||||
pub fn local_addr(&self) -> io::Result<SocketAddr> {
|
||||
self.socket.local_addr()
|
||||
}
|
||||
|
||||
/// See [`tokio::net::UdpSocket::writable`].
|
||||
pub async fn writable(&self) -> Result<(), io::Error> {
|
||||
self.socket.writable().await
|
||||
}
|
||||
|
||||
/// See [`tokio::net::UdpSocket::readable`].
|
||||
pub async fn readable(&self) -> Result<(), io::Error> {
|
||||
self.socket.readable().await
|
||||
}
|
||||
|
||||
/// Send the UDP datagram on the specified socket.
|
||||
pub fn send(&self, d: Datagram) -> io::Result<usize> {
|
||||
let transmit = Transmit {
|
||||
destination: d.destination(),
|
||||
ecn: EcnCodepoint::from_bits(Into::<u8>::into(d.tos())),
|
||||
contents: d.into_data().into(),
|
||||
segment_size: None,
|
||||
src_ip: None,
|
||||
};
|
||||
|
||||
let n = self.socket.try_io(Interest::WRITABLE, || {
|
||||
self.state
|
||||
.send((&self.socket).into(), slice::from_ref(&transmit))
|
||||
})?;
|
||||
|
||||
assert_eq!(n, 1, "only passed one slice");
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
/// Receive a UDP datagram on the specified socket.
|
||||
pub fn recv(&mut self, local_address: &SocketAddr) -> Result<Vec<Datagram>, io::Error> {
|
||||
let mut meta = RecvMeta::default();
|
||||
|
||||
match self.socket.try_io(Interest::READABLE, || {
|
||||
self.state.recv(
|
||||
(&self.socket).into(),
|
||||
&mut [IoSliceMut::new(&mut self.recv_buf)],
|
||||
slice::from_mut(&mut meta),
|
||||
)
|
||||
}) {
|
||||
Ok(n) => {
|
||||
assert_eq!(n, 1, "only passed one slice");
|
||||
}
|
||||
Err(ref err)
|
||||
if err.kind() == io::ErrorKind::WouldBlock
|
||||
|| err.kind() == io::ErrorKind::Interrupted =>
|
||||
{
|
||||
return Ok(vec![])
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
if meta.len == 0 {
|
||||
eprintln!("zero length datagram received?");
|
||||
return Ok(vec![]);
|
||||
}
|
||||
if meta.len == self.recv_buf.len() {
|
||||
eprintln!(
|
||||
"Might have received more than {} bytes",
|
||||
self.recv_buf.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(self.recv_buf[0..meta.len]
|
||||
.chunks(meta.stride.min(self.recv_buf.len()))
|
||||
.map(|d| {
|
||||
Datagram::new(
|
||||
meta.addr,
|
||||
*local_address,
|
||||
meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(),
|
||||
None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749
|
||||
d,
|
||||
)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{IpTosDscp, IpTosEcn};
|
||||
|
||||
#[tokio::test]
|
||||
async fn datagram_tos() -> Result<(), io::Error> {
|
||||
let sender = Socket::bind("127.0.0.1:0")?;
|
||||
let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let mut receiver = Socket::bind(receiver_addr)?;
|
||||
|
||||
let datagram = Datagram::new(
|
||||
sender.local_addr()?,
|
||||
receiver.local_addr()?,
|
||||
IpTos::from((IpTosDscp::Le, IpTosEcn::Ect1)),
|
||||
None,
|
||||
"Hello, world!".as_bytes().to_vec(),
|
||||
);
|
||||
|
||||
sender.writable().await?;
|
||||
sender.send(datagram.clone())?;
|
||||
|
||||
receiver.readable().await?;
|
||||
let received_datagram = receiver
|
||||
.recv(&receiver_addr)
|
||||
.expect("receive to succeed")
|
||||
.into_iter()
|
||||
.next()
|
||||
.expect("receive to yield datagram");
|
||||
|
||||
// Assert that the ECN is correct.
|
||||
assert_eq!(
|
||||
IpTosEcn::from(datagram.tos()),
|
||||
IpTosEcn::from(received_datagram.tos())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Expect [`Socket::recv`] to handle multiple [`Datagram`]s on GRO read.
|
||||
#[tokio::test]
|
||||
#[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)]
|
||||
async fn many_datagrams_through_gro() -> Result<(), io::Error> {
|
||||
const SEGMENT_SIZE: usize = 128;
|
||||
|
||||
let sender = Socket::bind("127.0.0.1:0")?;
|
||||
let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let mut receiver = Socket::bind(receiver_addr)?;
|
||||
|
||||
// `neqo_common::udp::Socket::send` does not yet
|
||||
// (https://github.com/mozilla/neqo/issues/1693) support GSO. Use
|
||||
// `quinn_udp` directly.
|
||||
let max_gso_segments = sender.state.max_gso_segments();
|
||||
let msg = vec![0xAB; SEGMENT_SIZE * max_gso_segments];
|
||||
let transmit = Transmit {
|
||||
destination: receiver.local_addr()?,
|
||||
ecn: EcnCodepoint::from_bits(Into::<u8>::into(IpTos::from((
|
||||
IpTosDscp::Le,
|
||||
IpTosEcn::Ect1,
|
||||
)))),
|
||||
contents: msg.clone().into(),
|
||||
segment_size: Some(SEGMENT_SIZE),
|
||||
src_ip: None,
|
||||
};
|
||||
sender.writable().await?;
|
||||
let n = sender.socket.try_io(Interest::WRITABLE, || {
|
||||
sender
|
||||
.state
|
||||
.send((&sender.socket).into(), slice::from_ref(&transmit))
|
||||
})?;
|
||||
assert_eq!(n, 1, "only passed one slice");
|
||||
|
||||
// Allow for one GSO sendmmsg to result in multiple GRO recvmmsg.
|
||||
let mut num_received = 0;
|
||||
while num_received < max_gso_segments {
|
||||
receiver.readable().await?;
|
||||
receiver
|
||||
.recv(&receiver_addr)
|
||||
.expect("receive to succeed")
|
||||
.into_iter()
|
||||
.for_each(|d| {
|
||||
assert_eq!(
|
||||
SEGMENT_SIZE,
|
||||
d.len(),
|
||||
"Expect received datagrams to have same length as sent datagrams."
|
||||
);
|
||||
num_received += 1;
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"6f1917fbd4cbf53cb4883c30e8fcb9c20f8ebe15e19576c7d37cb6ba0ab9e42b","bindings/bindings.toml":"0660c1661318b8a5094834c2f1bb12266287ef467307f66947eff7762528f70a","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"21d9a0140b2afd708583f58f2af0a4ba93ab07ec088680b4cbf0e184aeb8785b","src/aead.rs":"8f50e4557b7829edb67f57c80c777c6ae23c868e2b2eeaaae0736af04dc0d298","src/aead_fuzzing.rs":"c3e590572314e0bb3fafa13dac3c831358b8a7b5570fe9cfe592752fce8cbdee","src/agent.rs":"e995e9cc5108470594bae1b0d4e4bc6b7a8ac2b66488f71ea99e2836c0edbd7e","src/agentio.rs":"c4cb1b3cd92ef53eb0b4fb0b34a597068d82d78ba470dae5821670a0f06c9cda","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8942cb3ce25a61f92b6ffc30fb286052ed6f56eeda3be12fd46ea76ceba6c1cf","src/constants.rs":"f22bf16bd8cb539862cb1e47138dbba79e93fe738f4b907e465891326f98883c","src/ech.rs":"9d322fcc01c0886f1dfe9bb6273cb9f88a746452ac9a802761b1816a05930c1f","src/err.rs":"fca0222167883231a5e0a569a593f44214501819adf5aadf814be27891c87c24","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"ef32f20e30a9bd7f094199536d19c87c4231b7fbbe4a9c54c70e84ca9c6575be","src/hp.rs":"644f1bed67f1c6189a67c8d02ab3358aaa7f63af4b913dd7395becbc01a84291","src/lib.rs":"23732c7799be038c0e0835b54e7c40cf6c6536113e0adb6ae3b41b216a6e5220","src/p11.rs":"e8c366def0df470101f3d120dcc4391f74f921fe59e2f3db2a56832e2852b855","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"96b7af8eff9e14313e79303092018b12e8834f780c96b8e247c497fdc680c696","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"ac65b13f5bade9d03ab4709364f9ec937fa4ca009965c77ca73b481534a0a470","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"3b2829a98a1648eb052db19bb470808b6b015a1eca27ab7be64b5d196c0271c0","tests/aead.rs":"3ac4fe4ab79922b5d0191a9717058fc8d0710380ce9b25448095f870f511844f","tests/agent.rs":"824735f88e487a3748200844e9481e81a72163ad74d82faa9aa16594d9b9bb25","tests/ext.rs":"1b047d23d9b224ad06eb65d8f3a7b351e263774e404c79bbcbe8f43790e29c18","tests/handshake.rs":"e892a2839b31414be16e96cdf3b1a65978716094700c1a4989229f7edbf578a0","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"b24fec53771c169be788772532d2617a5349196cf87d6444dc74214f7c73e92c","tests/init.rs":"44fe7626b75ab8c57adfee361bb70a83d5958797e1eb6c4531bb74988ba3a990","tests/selfencrypt.rs":"25813b0c6f32fc8383bb7685745feb750eb3fdc0a6a172a50d961c68d39f2a46"},"package":null}
|
||||
{"files":{"Cargo.toml":"9414077b6a604ab5cf1bf64fa2f81860df8c5e1f31cb0a7fe5883dc1a1c9a706","bindings/bindings.toml":"29ec7a8ef3d5f1e4a632003e2d36c270e1caf12fd3fcf108a22d1893b90a41a6","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"cbf6a7d912314784c8c124cf7319c910a786d0e263f466843edd3f43826f036c","min_version.txt":"7e98f86c69cddb4f65cf96a6de1f4297e3ce224a4c4628609e29042b6c4dcfb9","src/aead.rs":"fc42bc20b84d2e5ccfd56271ae2d2db082e55586ea2926470c102da177f22296","src/aead_null.rs":"664f80bbb56d0abd3794b99cc927fd5f678ddb4ce95456001413ec18a6c6a6a9","src/agent.rs":"b12004faee4a136c10e8168848d397443b5927e9497edb62c72e6db3eb1c10a0","src/agentio.rs":"c4cb1b3cd92ef53eb0b4fb0b34a597068d82d78ba470dae5821670a0f06c9cda","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8942cb3ce25a61f92b6ffc30fb286052ed6f56eeda3be12fd46ea76ceba6c1cf","src/constants.rs":"f22bf16bd8cb539862cb1e47138dbba79e93fe738f4b907e465891326f98883c","src/ech.rs":"9d322fcc01c0886f1dfe9bb6273cb9f88a746452ac9a802761b1816a05930c1f","src/err.rs":"ae979f334604aba89640c4491262641910033f0bd790d58671f649f5039b291c","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"ef32f20e30a9bd7f094199536d19c87c4231b7fbbe4a9c54c70e84ca9c6575be","src/hp.rs":"644f1bed67f1c6189a67c8d02ab3358aaa7f63af4b913dd7395becbc01a84291","src/lib.rs":"6b2d0eb2c55f6351d673d3a3e5fc5adac8d1030c67dae9af4c79552de0f57455","src/min_version.rs":"89b7ef6f9d2301db4f689f4d963b58375d577f705b92003a804048441e00cfd1","src/p11.rs":"e8c366def0df470101f3d120dcc4391f74f921fe59e2f3db2a56832e2852b855","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"96b7af8eff9e14313e79303092018b12e8834f780c96b8e247c497fdc680c696","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"b7cc1c896c7661c37461fc3a8bcbfdf2589433b907fa5f968ae4f6907704b441","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"3b2829a98a1648eb052db19bb470808b6b015a1eca27ab7be64b5d196c0271c0","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"824735f88e487a3748200844e9481e81a72163ad74d82faa9aa16594d9b9bb25","tests/ext.rs":"1b047d23d9b224ad06eb65d8f3a7b351e263774e404c79bbcbe8f43790e29c18","tests/handshake.rs":"e892a2839b31414be16e96cdf3b1a65978716094700c1a4989229f7edbf578a0","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"b24fec53771c169be788772532d2617a5349196cf87d6444dc74214f7c73e92c","tests/init.rs":"616313cb38eac44b8c71a1d23a52a7d7b4c7c07d4c20dc9ea6600c3317f92613","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
|
||||
8
third_party/rust/neqo-crypto/Cargo.toml
vendored
8
third_party/rust/neqo-crypto/Cargo.toml
vendored
|
|
@ -13,7 +13,7 @@
|
|||
edition = "2021"
|
||||
rust-version = "1.74.0"
|
||||
name = "neqo-crypto"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
build = "build.rs"
|
||||
homepage = "https://github.com/mozilla/neqo/"
|
||||
|
|
@ -43,6 +43,10 @@ version = "0.1"
|
|||
optional = true
|
||||
default-features = false
|
||||
|
||||
[build-dependencies.semver]
|
||||
version = "1.0"
|
||||
default-features = false
|
||||
|
||||
[build-dependencies.serde]
|
||||
version = "1.0"
|
||||
default-features = false
|
||||
|
|
@ -56,7 +60,7 @@ version = "0.5"
|
|||
default-features = false
|
||||
|
||||
[features]
|
||||
fuzzing = []
|
||||
disable-encryption = []
|
||||
gecko = ["mozbuild"]
|
||||
|
||||
[lints.clippy.pedantic]
|
||||
|
|
|
|||
|
|
@ -265,8 +265,3 @@ enums = [
|
|||
[nspr_time]
|
||||
types = ["PRTime"]
|
||||
functions = ["PR_Now"]
|
||||
|
||||
[mozpkix]
|
||||
cplusplus = true
|
||||
types = ["mozilla::pkix::ErrorCode"]
|
||||
enums = ["mozilla::pkix::ErrorCode"]
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
#include "mozpkix/pkixnss.h"
|
||||
105
third_party/rust/neqo-crypto/build.rs
vendored
105
third_party/rust/neqo-crypto/build.rs
vendored
|
|
@ -12,8 +12,13 @@ use std::{
|
|||
};
|
||||
|
||||
use bindgen::Builder;
|
||||
use semver::{Version, VersionReq};
|
||||
use serde_derive::Deserialize;
|
||||
|
||||
#[path = "src/min_version.rs"]
|
||||
mod min_version;
|
||||
use min_version::MINIMUM_NSS_VERSION;
|
||||
|
||||
const BINDINGS_DIR: &str = "bindings";
|
||||
const BINDINGS_CONFIG: &str = "bindings.toml";
|
||||
|
||||
|
|
@ -90,46 +95,6 @@ fn setup_clang() {
|
|||
}
|
||||
}
|
||||
|
||||
fn nss_dir() -> PathBuf {
|
||||
let dir = if let Ok(dir) = env::var("NSS_DIR") {
|
||||
let path = PathBuf::from(dir.trim());
|
||||
assert!(
|
||||
!path.is_relative(),
|
||||
"The NSS_DIR environment variable is expected to be an absolute path."
|
||||
);
|
||||
path
|
||||
} else {
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let dir = Path::new(&out_dir).join("nss");
|
||||
if !dir.exists() {
|
||||
Command::new("hg")
|
||||
.args([
|
||||
"clone",
|
||||
"https://hg.mozilla.org/projects/nss",
|
||||
dir.to_str().unwrap(),
|
||||
])
|
||||
.status()
|
||||
.expect("can't clone nss");
|
||||
}
|
||||
let nspr_dir = Path::new(&out_dir).join("nspr");
|
||||
if !nspr_dir.exists() {
|
||||
Command::new("hg")
|
||||
.args([
|
||||
"clone",
|
||||
"https://hg.mozilla.org/projects/nspr",
|
||||
nspr_dir.to_str().unwrap(),
|
||||
])
|
||||
.status()
|
||||
.expect("can't clone nspr");
|
||||
}
|
||||
dir
|
||||
};
|
||||
assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist");
|
||||
// Note that this returns a relative path because UNC
|
||||
// paths on windows cause certain tools to explode.
|
||||
dir
|
||||
}
|
||||
|
||||
fn get_bash() -> PathBuf {
|
||||
// If BASH is set, use that.
|
||||
if let Ok(bash) = env::var("BASH") {
|
||||
|
|
@ -295,11 +260,63 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool
|
|||
.expect("couldn't write bindings");
|
||||
}
|
||||
|
||||
fn setup_standalone() -> Vec<String> {
|
||||
fn pkg_config() -> Vec<String> {
|
||||
let modversion = Command::new("pkg-config")
|
||||
.args(["--modversion", "nss"])
|
||||
.output()
|
||||
.expect("pkg-config reports NSS as absent")
|
||||
.stdout;
|
||||
let modversion = String::from_utf8(modversion).expect("non-UTF8 from pkg-config");
|
||||
let modversion = modversion.trim();
|
||||
// The NSS version number does not follow semver numbering, because it omits the patch version
|
||||
// when that's 0. Deal with that.
|
||||
let modversion_for_cmp = if modversion.chars().filter(|c| *c == '.').count() == 1 {
|
||||
modversion.to_owned() + ".0"
|
||||
} else {
|
||||
modversion.to_owned()
|
||||
};
|
||||
let modversion_for_cmp =
|
||||
Version::parse(&modversion_for_cmp).expect("NSS version not in semver format");
|
||||
let version_req = VersionReq::parse(&format!(">={}", MINIMUM_NSS_VERSION.trim())).unwrap();
|
||||
assert!(
|
||||
version_req.matches(&modversion_for_cmp),
|
||||
"neqo has NSS version requirement {version_req}, found {modversion}"
|
||||
);
|
||||
|
||||
let cfg = Command::new("pkg-config")
|
||||
.args(["--cflags", "--libs", "nss"])
|
||||
.output()
|
||||
.expect("NSS flags not returned by pkg-config")
|
||||
.stdout;
|
||||
let cfg_str = String::from_utf8(cfg).expect("non-UTF8 from pkg-config");
|
||||
|
||||
let mut flags: Vec<String> = Vec::new();
|
||||
for f in cfg_str.split(' ') {
|
||||
if let Some(include) = f.strip_prefix("-I") {
|
||||
flags.push(String::from(f));
|
||||
println!("cargo:include={include}");
|
||||
} else if let Some(path) = f.strip_prefix("-L") {
|
||||
println!("cargo:rustc-link-search=native={path}");
|
||||
} else if let Some(lib) = f.strip_prefix("-l") {
|
||||
println!("cargo:rustc-link-lib=dylib={lib}");
|
||||
} else {
|
||||
println!("Warning: Unknown flag from pkg-config: {f}");
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
fn setup_standalone(nss: &str) -> Vec<String> {
|
||||
setup_clang();
|
||||
|
||||
println!("cargo:rerun-if-env-changed=NSS_DIR");
|
||||
let nss = nss_dir();
|
||||
let nss = PathBuf::from(nss);
|
||||
assert!(
|
||||
!nss.is_relative(),
|
||||
"The NSS_DIR environment variable is expected to be an absolute path."
|
||||
);
|
||||
|
||||
build_nss(nss.clone());
|
||||
|
||||
// $NSS_DIR/../dist/
|
||||
|
|
@ -406,8 +423,10 @@ fn setup_for_gecko() -> Vec<String> {
|
|||
fn main() {
|
||||
let flags = if cfg!(feature = "gecko") {
|
||||
setup_for_gecko()
|
||||
} else if let Ok(nss_dir) = env::var("NSS_DIR") {
|
||||
setup_standalone(nss_dir.trim())
|
||||
} else {
|
||||
setup_standalone()
|
||||
pkg_config()
|
||||
};
|
||||
|
||||
let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG);
|
||||
|
|
|
|||
1
third_party/rust/neqo-crypto/min_version.txt
vendored
Normal file
1
third_party/rust/neqo-crypto/min_version.txt
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
3.98
|
||||
8
third_party/rust/neqo-crypto/src/aead.rs
vendored
8
third_party/rust/neqo-crypto/src/aead.rs
vendored
|
|
@ -63,13 +63,7 @@ impl RealAead {
|
|||
/// # Errors
|
||||
///
|
||||
/// Returns `Error` when the supporting NSS functions fail.
|
||||
pub fn new(
|
||||
_fuzzing: bool,
|
||||
version: Version,
|
||||
cipher: Cipher,
|
||||
secret: &SymKey,
|
||||
prefix: &str,
|
||||
) -> Res<Self> {
|
||||
pub fn new(version: Version, cipher: Cipher, secret: &SymKey, prefix: &str) -> Res<Self> {
|
||||
let s: *mut PK11SymKey = **secret;
|
||||
unsafe { Self::from_raw(version, cipher, s, prefix) }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,84 +4,63 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![cfg(feature = "disable-encryption")]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use crate::{
|
||||
constants::{Cipher, Version},
|
||||
err::{sec::SEC_ERROR_BAD_DATA, Error, Res},
|
||||
p11::SymKey,
|
||||
RealAead,
|
||||
};
|
||||
|
||||
pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16];
|
||||
pub const AEAD_NULL_TAG: &[u8] = &[0x0a; 16];
|
||||
|
||||
pub struct FuzzingAead {
|
||||
real: Option<RealAead>,
|
||||
}
|
||||
pub struct AeadNull {}
|
||||
|
||||
impl FuzzingAead {
|
||||
pub fn new(
|
||||
fuzzing: bool,
|
||||
version: Version,
|
||||
cipher: Cipher,
|
||||
secret: &SymKey,
|
||||
prefix: &str,
|
||||
) -> Res<Self> {
|
||||
let real = if fuzzing {
|
||||
None
|
||||
} else {
|
||||
Some(RealAead::new(false, version, cipher, secret, prefix)?)
|
||||
};
|
||||
Ok(Self { real })
|
||||
impl AeadNull {
|
||||
#[allow(clippy::missing_errors_doc)]
|
||||
pub fn new(_version: Version, _cipher: Cipher, _secret: &SymKey, _prefix: &str) -> Res<Self> {
|
||||
Ok(Self {})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn expansion(&self) -> usize {
|
||||
if let Some(aead) = &self.real {
|
||||
aead.expansion()
|
||||
} else {
|
||||
FIXED_TAG_FUZZING.len()
|
||||
}
|
||||
AEAD_NULL_TAG.len()
|
||||
}
|
||||
|
||||
#[allow(clippy::missing_errors_doc)]
|
||||
pub fn encrypt<'a>(
|
||||
&self,
|
||||
count: u64,
|
||||
aad: &[u8],
|
||||
_count: u64,
|
||||
_aad: &[u8],
|
||||
input: &[u8],
|
||||
output: &'a mut [u8],
|
||||
) -> Res<&'a [u8]> {
|
||||
if let Some(aead) = &self.real {
|
||||
return aead.encrypt(count, aad, input, output);
|
||||
}
|
||||
|
||||
let l = input.len();
|
||||
output[..l].copy_from_slice(input);
|
||||
output[l..l + 16].copy_from_slice(FIXED_TAG_FUZZING);
|
||||
output[l..l + 16].copy_from_slice(AEAD_NULL_TAG);
|
||||
Ok(&output[..l + 16])
|
||||
}
|
||||
|
||||
#[allow(clippy::missing_errors_doc)]
|
||||
pub fn decrypt<'a>(
|
||||
&self,
|
||||
count: u64,
|
||||
aad: &[u8],
|
||||
_count: u64,
|
||||
_aad: &[u8],
|
||||
input: &[u8],
|
||||
output: &'a mut [u8],
|
||||
) -> Res<&'a [u8]> {
|
||||
if let Some(aead) = &self.real {
|
||||
return aead.decrypt(count, aad, input, output);
|
||||
}
|
||||
|
||||
if input.len() < FIXED_TAG_FUZZING.len() {
|
||||
if input.len() < AEAD_NULL_TAG.len() {
|
||||
return Err(Error::from(SEC_ERROR_BAD_DATA));
|
||||
}
|
||||
|
||||
let len_encrypted = input.len() - FIXED_TAG_FUZZING.len();
|
||||
let len_encrypted = input.len() - AEAD_NULL_TAG.len();
|
||||
// Check that:
|
||||
// 1) expansion is all zeros and
|
||||
// 2) if the encrypted data is also supplied that at least some values are no zero
|
||||
// (otherwise padding will be interpreted as a valid packet)
|
||||
if &input[len_encrypted..] == FIXED_TAG_FUZZING
|
||||
if &input[len_encrypted..] == AEAD_NULL_TAG
|
||||
&& (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0))
|
||||
{
|
||||
output[..len_encrypted].copy_from_slice(&input[..len_encrypted]);
|
||||
|
|
@ -92,12 +71,8 @@ impl FuzzingAead {
|
|||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for FuzzingAead {
|
||||
impl fmt::Debug for AeadNull {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if let Some(a) = &self.real {
|
||||
a.fmt(f)
|
||||
} else {
|
||||
write!(f, "[FUZZING AEAD]")
|
||||
}
|
||||
write!(f, "[NULL AEAD]")
|
||||
}
|
||||
}
|
||||
11
third_party/rust/neqo-crypto/src/agent.rs
vendored
11
third_party/rust/neqo-crypto/src/agent.rs
vendored
|
|
@ -16,7 +16,7 @@ use std::{
|
|||
time::Instant,
|
||||
};
|
||||
|
||||
use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn};
|
||||
use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qtrace, qwarn};
|
||||
|
||||
pub use crate::{
|
||||
agentio::{as_c_void, Record, RecordList},
|
||||
|
|
@ -406,10 +406,7 @@ impl SecretAgent {
|
|||
self.set_option(ssl::Opt::Locking, false)?;
|
||||
self.set_option(ssl::Opt::Tickets, false)?;
|
||||
self.set_option(ssl::Opt::OcspStapling, true)?;
|
||||
if let Err(e) = self.set_option(ssl::Opt::Grease, grease) {
|
||||
// Until NSS supports greasing, it's OK to fail here.
|
||||
qinfo!([self], "Failed to enable greasing {:?}", e);
|
||||
}
|
||||
self.set_option(ssl::Opt::Grease, grease)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -670,7 +667,7 @@ impl SecretAgent {
|
|||
let info = self.capture_error(SecretAgentInfo::new(self.fd))?;
|
||||
HandshakeState::Complete(info)
|
||||
};
|
||||
qinfo!([self], "state -> {:?}", self.state);
|
||||
qdebug!([self], "state -> {:?}", self.state);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -898,7 +895,7 @@ impl Client {
|
|||
let len = usize::try_from(len).unwrap();
|
||||
let mut v = Vec::with_capacity(len);
|
||||
v.extend_from_slice(null_safe_slice(token, len));
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
[format!("{fd:p}")],
|
||||
"Got resumption token {}",
|
||||
hex_snip_middle(&v)
|
||||
|
|
|
|||
30
third_party/rust/neqo-crypto/src/err.rs
vendored
30
third_party/rust/neqo-crypto/src/err.rs
vendored
|
|
@ -16,13 +16,39 @@ mod codes {
|
|||
#![allow(non_snake_case)]
|
||||
include!(concat!(env!("OUT_DIR"), "/nss_secerr.rs"));
|
||||
include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs"));
|
||||
include!(concat!(env!("OUT_DIR"), "/mozpkix.rs"));
|
||||
}
|
||||
pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl};
|
||||
pub use codes::{SECErrorCodes as sec, SSLErrorCodes as ssl};
|
||||
pub mod nspr {
|
||||
include!(concat!(env!("OUT_DIR"), "/nspr_err.rs"));
|
||||
}
|
||||
|
||||
pub mod mozpkix {
|
||||
// These are manually extracted from the many bindings generated
|
||||
// by bindgen when provided with the simple header:
|
||||
// #include "mozpkix/pkixnss.h"
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type mozilla_pkix_ErrorCode = ::std::os::raw::c_int;
|
||||
pub const MOZILLA_PKIX_ERROR_KEY_PINNING_FAILURE: mozilla_pkix_ErrorCode = -16384;
|
||||
pub const MOZILLA_PKIX_ERROR_CA_CERT_USED_AS_END_ENTITY: mozilla_pkix_ErrorCode = -16383;
|
||||
pub const MOZILLA_PKIX_ERROR_INADEQUATE_KEY_SIZE: mozilla_pkix_ErrorCode = -16382;
|
||||
pub const MOZILLA_PKIX_ERROR_V1_CERT_USED_AS_CA: mozilla_pkix_ErrorCode = -16381;
|
||||
pub const MOZILLA_PKIX_ERROR_NO_RFC822NAME_MATCH: mozilla_pkix_ErrorCode = -16380;
|
||||
pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_CERTIFICATE: mozilla_pkix_ErrorCode = -16379;
|
||||
pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_ISSUER_CERTIFICATE: mozilla_pkix_ErrorCode = -16378;
|
||||
pub const MOZILLA_PKIX_ERROR_SIGNATURE_ALGORITHM_MISMATCH: mozilla_pkix_ErrorCode = -16377;
|
||||
pub const MOZILLA_PKIX_ERROR_OCSP_RESPONSE_FOR_CERT_MISSING: mozilla_pkix_ErrorCode = -16376;
|
||||
pub const MOZILLA_PKIX_ERROR_VALIDITY_TOO_LONG: mozilla_pkix_ErrorCode = -16375;
|
||||
pub const MOZILLA_PKIX_ERROR_REQUIRED_TLS_FEATURE_MISSING: mozilla_pkix_ErrorCode = -16374;
|
||||
pub const MOZILLA_PKIX_ERROR_INVALID_INTEGER_ENCODING: mozilla_pkix_ErrorCode = -16373;
|
||||
pub const MOZILLA_PKIX_ERROR_EMPTY_ISSUER_NAME: mozilla_pkix_ErrorCode = -16372;
|
||||
pub const MOZILLA_PKIX_ERROR_ADDITIONAL_POLICY_CONSTRAINT_FAILED: mozilla_pkix_ErrorCode =
|
||||
-16371;
|
||||
pub const MOZILLA_PKIX_ERROR_SELF_SIGNED_CERT: mozilla_pkix_ErrorCode = -16370;
|
||||
pub const MOZILLA_PKIX_ERROR_MITM_DETECTED: mozilla_pkix_ErrorCode = -16369;
|
||||
pub const END_OF_LIST: mozilla_pkix_ErrorCode = -16368;
|
||||
}
|
||||
|
||||
pub type Res<T> = Result<T, Error>;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)]
|
||||
|
|
|
|||
68
third_party/rust/neqo-crypto/src/lib.rs
vendored
68
third_party/rust/neqo-crypto/src/lib.rs
vendored
|
|
@ -8,8 +8,8 @@
|
|||
#![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code.
|
||||
|
||||
mod aead;
|
||||
#[cfg(feature = "fuzzing")]
|
||||
mod aead_fuzzing;
|
||||
#[cfg(feature = "disable-encryption")]
|
||||
pub mod aead_null;
|
||||
pub mod agent;
|
||||
mod agentio;
|
||||
mod auth;
|
||||
|
|
@ -33,12 +33,12 @@ mod time;
|
|||
|
||||
use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock};
|
||||
|
||||
#[cfg(not(feature = "fuzzing"))]
|
||||
#[cfg(not(feature = "disable-encryption"))]
|
||||
pub use self::aead::RealAead as Aead;
|
||||
#[cfg(feature = "fuzzing")]
|
||||
#[cfg(feature = "disable-encryption")]
|
||||
pub use self::aead::RealAead;
|
||||
#[cfg(feature = "fuzzing")]
|
||||
pub use self::aead_fuzzing::FuzzingAead as Aead;
|
||||
#[cfg(feature = "disable-encryption")]
|
||||
pub use self::aead_null::AeadNull as Aead;
|
||||
pub use self::{
|
||||
agent::{
|
||||
Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken,
|
||||
|
|
@ -59,7 +59,8 @@ pub use self::{
|
|||
ssl::Opt,
|
||||
};
|
||||
|
||||
const MINIMUM_NSS_VERSION: &str = "3.97";
|
||||
mod min_version;
|
||||
use min_version::MINIMUM_NSS_VERSION;
|
||||
|
||||
#[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
|
|
@ -89,7 +90,7 @@ impl Drop for NssLoaded {
|
|||
}
|
||||
}
|
||||
|
||||
static INITIALIZED: OnceLock<NssLoaded> = OnceLock::new();
|
||||
static INITIALIZED: OnceLock<Res<NssLoaded>> = OnceLock::new();
|
||||
|
||||
fn already_initialized() -> bool {
|
||||
unsafe { nss::NSS_IsInitialized() != 0 }
|
||||
|
|
@ -107,24 +108,24 @@ fn version_check() {
|
|||
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance
|
||||
/// that
|
||||
///
|
||||
/// # Panics
|
||||
/// # Errors
|
||||
///
|
||||
/// When NSS initialization fails.
|
||||
pub fn init() {
|
||||
pub fn init() -> Res<()> {
|
||||
// Set time zero.
|
||||
time::init();
|
||||
_ = INITIALIZED.get_or_init(|| {
|
||||
let res = INITIALIZED.get_or_init(|| {
|
||||
version_check();
|
||||
if already_initialized() {
|
||||
return NssLoaded::External;
|
||||
return Ok(NssLoaded::External);
|
||||
}
|
||||
|
||||
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed");
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })
|
||||
.expect("NSS_SetDomesticPolicy failed");
|
||||
secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?;
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||
|
||||
NssLoaded::NoDb
|
||||
Ok(NssLoaded::NoDb)
|
||||
});
|
||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||
}
|
||||
|
||||
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
|
||||
|
|
@ -132,31 +133,32 @@ pub fn init() {
|
|||
/// global options are accessed. Reading an option is the least impact approach.
|
||||
/// This allows us to use SSLTRACE in all of our unit tests and programs.
|
||||
#[cfg(debug_assertions)]
|
||||
fn enable_ssl_trace() {
|
||||
fn enable_ssl_trace() -> Res<()> {
|
||||
let opt = ssl::Opt::Locking.as_int();
|
||||
let mut v: ::std::os::raw::c_int = 0;
|
||||
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) })
|
||||
.expect("SSL_OptionGetDefault failed");
|
||||
}
|
||||
|
||||
/// Initialize with a database.
|
||||
///
|
||||
/// # Panics
|
||||
/// # Errors
|
||||
///
|
||||
/// If NSS cannot be initialized.
|
||||
pub fn init_db<P: Into<PathBuf>>(dir: P) {
|
||||
pub fn init_db<P: Into<PathBuf>>(dir: P) -> Res<()> {
|
||||
time::init();
|
||||
_ = INITIALIZED.get_or_init(|| {
|
||||
let res = INITIALIZED.get_or_init(|| {
|
||||
version_check();
|
||||
if already_initialized() {
|
||||
return NssLoaded::External;
|
||||
return Ok(NssLoaded::External);
|
||||
}
|
||||
|
||||
let path = dir.into();
|
||||
assert!(path.is_dir());
|
||||
let pathstr = path.to_str().expect("path converts to string").to_string();
|
||||
let dircstr = CString::new(pathstr).unwrap();
|
||||
let empty = CString::new("").unwrap();
|
||||
if !path.is_dir() {
|
||||
return Err(Error::InternalError);
|
||||
}
|
||||
let pathstr = path.to_str().ok_or(Error::InternalError)?;
|
||||
let dircstr = CString::new(pathstr)?;
|
||||
let empty = CString::new("")?;
|
||||
secstatus_to_res(unsafe {
|
||||
nss::NSS_Initialize(
|
||||
dircstr.as_ptr(),
|
||||
|
|
@ -165,21 +167,19 @@ pub fn init_db<P: Into<PathBuf>>(dir: P) {
|
|||
nss::SECMOD_DB.as_ptr().cast(),
|
||||
nss::NSS_INIT_READONLY,
|
||||
)
|
||||
})
|
||||
.expect("NSS_Initialize failed");
|
||||
})?;
|
||||
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })
|
||||
.expect("NSS_SetDomesticPolicy failed");
|
||||
secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?;
|
||||
secstatus_to_res(unsafe {
|
||||
ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr())
|
||||
})
|
||||
.expect("SSL_ConfigServerSessionIDCache failed");
|
||||
})?;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
enable_ssl_trace();
|
||||
enable_ssl_trace()?;
|
||||
|
||||
NssLoaded::Db
|
||||
Ok(NssLoaded::Db)
|
||||
});
|
||||
res.as_ref().map(|_| ()).map_err(Clone::clone)
|
||||
}
|
||||
|
||||
/// # Panics
|
||||
|
|
|
|||
9
third_party/rust/neqo-crypto/src/min_version.rs
vendored
Normal file
9
third_party/rust/neqo-crypto/src/min_version.rs
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/// The minimum version of NSS that is required by this version of neqo.
|
||||
/// Note that the string may contain whitespace at the beginning and/or end.
|
||||
pub(crate) const MINIMUM_NSS_VERSION: &str = include_str!("../min_version.txt");
|
||||
|
|
@ -47,7 +47,7 @@ impl SelfEncrypt {
|
|||
debug_assert_eq!(salt.len(), Self::SALT_LENGTH);
|
||||
let salt = hkdf::import_key(self.version, salt)?;
|
||||
let secret = hkdf::extract(self.version, self.cipher, Some(&salt), k)?;
|
||||
Aead::new(false, self.version, self.cipher, &secret, "neqo self")
|
||||
Aead::new(self.version, self.cipher, &secret, "neqo self")
|
||||
}
|
||||
|
||||
/// Rotate keys. This causes any previous key that is being held to be replaced by the current
|
||||
|
|
|
|||
3
third_party/rust/neqo-crypto/tests/aead.rs
vendored
3
third_party/rust/neqo-crypto/tests/aead.rs
vendored
|
|
@ -5,7 +5,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
#![warn(clippy::pedantic)]
|
||||
#![cfg(not(feature = "fuzzing"))]
|
||||
#![cfg(not(feature = "disable-encryption"))]
|
||||
|
||||
use neqo_crypto::{
|
||||
constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3},
|
||||
|
|
@ -40,7 +40,6 @@ fn make_aead(cipher: Cipher) -> Aead {
|
|||
)
|
||||
.expect("make a secret");
|
||||
Aead::new(
|
||||
false,
|
||||
TLS_VERSION_1_3,
|
||||
cipher,
|
||||
&secret,
|
||||
|
|
|
|||
51
third_party/rust/neqo-crypto/tests/init.rs
vendored
51
third_party/rust/neqo-crypto/tests/init.rs
vendored
|
|
@ -15,13 +15,7 @@ use neqo_crypto::{assert_initialized, init_db};
|
|||
|
||||
// Pull in the NSS internals so that we can ask NSS if it thinks that
|
||||
// it is properly initialized.
|
||||
#[allow(
|
||||
dead_code,
|
||||
non_upper_case_globals,
|
||||
clippy::redundant_static_lifetimes,
|
||||
clippy::unseparated_literal_suffix,
|
||||
clippy::upper_case_acronyms
|
||||
)]
|
||||
#[allow(dead_code, non_upper_case_globals)]
|
||||
mod nss {
|
||||
include!(concat!(env!("OUT_DIR"), "/nss_init.rs"));
|
||||
}
|
||||
|
|
@ -29,19 +23,54 @@ mod nss {
|
|||
#[cfg(nss_nodb)]
|
||||
#[test]
|
||||
fn init_nodb() {
|
||||
init();
|
||||
neqo_crypto::init().unwrap();
|
||||
assert_initialized();
|
||||
unsafe {
|
||||
assert!(nss::NSS_IsInitialized() != 0);
|
||||
assert_ne!(nss::NSS_IsInitialized(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(nss_nodb)]
|
||||
#[test]
|
||||
fn init_twice_nodb() {
|
||||
unsafe {
|
||||
nss::NSS_NoDB_Init(std::ptr::null());
|
||||
assert_ne!(nss::NSS_IsInitialized(), 0);
|
||||
}
|
||||
// Now do it again
|
||||
init_nodb();
|
||||
}
|
||||
|
||||
#[cfg(not(nss_nodb))]
|
||||
#[test]
|
||||
fn init_withdb() {
|
||||
init_db(::test_fixture::NSS_DB_PATH);
|
||||
init_db(::test_fixture::NSS_DB_PATH).unwrap();
|
||||
assert_initialized();
|
||||
unsafe {
|
||||
assert!(nss::NSS_IsInitialized() != 0);
|
||||
assert_ne!(nss::NSS_IsInitialized(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(nss_nodb))]
|
||||
#[test]
|
||||
fn init_twice_withdb() {
|
||||
use std::{ffi::CString, path::PathBuf};
|
||||
|
||||
let empty = CString::new("").unwrap();
|
||||
let path: PathBuf = ::test_fixture::NSS_DB_PATH.into();
|
||||
assert!(path.is_dir());
|
||||
let pathstr = path.to_str().unwrap();
|
||||
let dircstr = CString::new(pathstr).unwrap();
|
||||
unsafe {
|
||||
nss::NSS_Initialize(
|
||||
dircstr.as_ptr(),
|
||||
empty.as_ptr(),
|
||||
empty.as_ptr(),
|
||||
nss::SECMOD_DB.as_ptr().cast(),
|
||||
nss::NSS_INIT_READONLY,
|
||||
);
|
||||
assert_ne!(nss::NSS_IsInitialized(), 0);
|
||||
}
|
||||
// Now do it again
|
||||
init_withdb();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![cfg(not(feature = "fuzzing"))]
|
||||
#![cfg(not(feature = "disable-encryption"))]
|
||||
|
||||
use neqo_crypto::{
|
||||
constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3},
|
||||
|
|
@ -15,7 +15,7 @@ use neqo_crypto::{
|
|||
|
||||
#[test]
|
||||
fn se_create() {
|
||||
init();
|
||||
init().unwrap();
|
||||
SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).expect("constructor works");
|
||||
}
|
||||
|
||||
|
|
@ -23,7 +23,7 @@ const PLAINTEXT: &[u8] = b"PLAINTEXT";
|
|||
const AAD: &[u8] = b"AAD";
|
||||
|
||||
fn sealed() -> (SelfEncrypt, Vec<u8>) {
|
||||
init();
|
||||
init().unwrap();
|
||||
let se = SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).unwrap();
|
||||
let sealed = se.seal(AAD, PLAINTEXT).expect("sealing works");
|
||||
(se, sealed)
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"458f04261cda071d61402c52cf64062ad7cfc24f3f312bfaa5d52cae47409010","src/buffered_send_stream.rs":"f45bdf9ad2a04b3828c74ff5440681d3c9d1af39b55470e4f729842dc2412295","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"9384cdfd8481a30a0cd13f56f590188ccfa47b4472f35f7a4978537bab19adc1","src/connection_client.rs":"8db29409f3a265f7dff7c7a7eaf2ac607d6923e4b3238e82eab6dc22854e4303","src/connection_server.rs":"ca33b50650bd1ca2a952851b72712d55ec2e48b48f1f06e4184c808b8e1e009a","src/control_stream_local.rs":"ae52e3286f1686ca1265e7de841392addd42616db02799bb967a59feb6039cb5","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"4c85a90afb753ce588e3fdeb773669bc49c013aebc28912340359eb01b74fd70","src/features/extended_connect/tests/webtransport/mod.rs":"a30ea715f5271a826a739278b18e145964dedbce7026eed45f1b7d0355c407d5","src/features/extended_connect/tests/webtransport/negotiation.rs":"98254ef8446581ec520026b04ef9549645602181b61602c9936f6660141edf0b","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"239d92c06fbc5f6226078bb411a803f57b555dea0077349d49d7f57671cf2eab","src/features/extended_connect/webtransport_streams.rs":"5d7507aaf6a819d266fbea9b7a415c8324329df0f6936d9045b73e17a5b844ee","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"56c36ac597504f28c73cf2370acd82104f8c7a7b9ffc0f6d222378abc524482d","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"e07ee9de74bc499c10afcda592fefd9a7eef3381c045aa14f6596d67313546ca","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d9d0256ace2ba7262343ed035df795f21a4d45065792d3fd45b3391b6916b2f","src/headers_checks.rs":"be0f0109298dcc3a40350b7c0950076ddfe20617d195b305e3ffc8582557ab18","src/lib.rs":"4f908a021222bcc79b9d569bc3759a493379a20b47dfa228fddf51600bf6e446","src/priority.rs":"f3b77c208962e44a4e2d13138c6998b703d40e7bcf8f73ea84d8ef5b556e0aee","src/push_controller.rs":"13bccf2834ae19109504cf695a5948c3b2d03fd101bc032a92bb77a033423854","src/qlog.rs":"2debd75c7ea103c95ff79e44412f1408c3e496e324976100c55d5a833912b6c3","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"eb711dbc6b3371373c26b75333ac5858edf0d30184b0e05d67ab02c656eb6619","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"7785af11b77cee398faf3f7a2875b41e251ed7a1b272c23f81a48334596ab836","src/server.rs":"b9e6060da36cfb467478f5b78b17e22a123214ad2d64c919ce688ea2bc0e24bb","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"463dd2cb6f97a800bac32c93c4aa2a6289f71e33a89f3b33152460cb941fc378","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"bb6927801a8c75e4f05eb6cdb1e7f2d57be69b74e68ddad2a1614f2aeed04369","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"b5435045b16429d9e626ea94a8f10e2937e1a5a878af0035763a4f5ec09bf53c","tests/webtransport.rs":"25794305017ff58e57dc3c3b9b078e5bfc1814ea82a521b7b7156228e613c092"},"package":null}
|
||||
{"files":{"Cargo.toml":"14fc8adbe57d4828725c060fdcb432e4bd7e7041f95267bc62fb6f5eaee34a82","src/buffered_send_stream.rs":"f45bdf9ad2a04b3828c74ff5440681d3c9d1af39b55470e4f729842dc2412295","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"57e4660838c0d16d8f97e92c010b80ffd6c92de86c2f162f04ab2d31e724c02d","src/connection_client.rs":"796eba806f44d4a689cdeb009d7abfa3b76ba56bc66c551ce02a50465f0d59c5","src/connection_server.rs":"d8de13ca23ccaf4a485d552cdae09454a3ee6577b8e5fac0931e909f79461625","src/control_stream_local.rs":"ae52e3286f1686ca1265e7de841392addd42616db02799bb967a59feb6039cb5","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"4c85a90afb753ce588e3fdeb773669bc49c013aebc28912340359eb01b74fd70","src/features/extended_connect/tests/webtransport/mod.rs":"a30ea715f5271a826a739278b18e145964dedbce7026eed45f1b7d0355c407d5","src/features/extended_connect/tests/webtransport/negotiation.rs":"98254ef8446581ec520026b04ef9549645602181b61602c9936f6660141edf0b","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"239d92c06fbc5f6226078bb411a803f57b555dea0077349d49d7f57671cf2eab","src/features/extended_connect/webtransport_streams.rs":"5d7507aaf6a819d266fbea9b7a415c8324329df0f6936d9045b73e17a5b844ee","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"56c36ac597504f28c73cf2370acd82104f8c7a7b9ffc0f6d222378abc524482d","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"e07ee9de74bc499c10afcda592fefd9a7eef3381c045aa14f6596d67313546ca","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d9d0256ace2ba7262343ed035df795f21a4d45065792d3fd45b3391b6916b2f","src/headers_checks.rs":"be0f0109298dcc3a40350b7c0950076ddfe20617d195b305e3ffc8582557ab18","src/lib.rs":"4f908a021222bcc79b9d569bc3759a493379a20b47dfa228fddf51600bf6e446","src/priority.rs":"f3b77c208962e44a4e2d13138c6998b703d40e7bcf8f73ea84d8ef5b556e0aee","src/push_controller.rs":"13bccf2834ae19109504cf695a5948c3b2d03fd101bc032a92bb77a033423854","src/qlog.rs":"2debd75c7ea103c95ff79e44412f1408c3e496e324976100c55d5a833912b6c3","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"7ac8d4057ba53874e4edfc62cd25ad5d3f0b10aaac5bf6e156103c3bc44e18cc","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"374e168f60063b8102a2aff52c719ae2e1e5078527cf50d095b3e7217f6ec7d2","src/server.rs":"b9e6060da36cfb467478f5b78b17e22a123214ad2d64c919ce688ea2bc0e24bb","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"1cda8d6c413fad0fa67fcfd7cb78e795bf7ef7f0e09b5720992646a82d51ce16","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"bb6927801a8c75e4f05eb6cdb1e7f2d57be69b74e68ddad2a1614f2aeed04369","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"b5435045b16429d9e626ea94a8f10e2937e1a5a878af0035763a4f5ec09bf53c","tests/webtransport.rs":"25794305017ff58e57dc3c3b9b078e5bfc1814ea82a521b7b7156228e613c092"},"package":null}
|
||||
8
third_party/rust/neqo-http3/Cargo.toml
vendored
8
third_party/rust/neqo-http3/Cargo.toml
vendored
|
|
@ -13,7 +13,7 @@
|
|||
edition = "2021"
|
||||
rust-version = "1.74.0"
|
||||
name = "neqo-http3"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
homepage = "https://github.com/mozilla/neqo/"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
|
@ -62,9 +62,9 @@ default-features = false
|
|||
path = "../test-fixture"
|
||||
|
||||
[features]
|
||||
fuzzing = [
|
||||
"neqo-transport/fuzzing",
|
||||
"neqo-crypto/fuzzing",
|
||||
disable-encryption = [
|
||||
"neqo-transport/disable-encryption",
|
||||
"neqo-crypto/disable-encryption",
|
||||
]
|
||||
|
||||
[lints.clippy.pedantic]
|
||||
|
|
|
|||
16
third_party/rust/neqo-http3/src/connection.rs
vendored
16
third_party/rust/neqo-http3/src/connection.rs
vendored
|
|
@ -354,7 +354,7 @@ impl Http3Connection {
|
|||
/// This function creates and initializes, i.e. send stream type, the control and qpack
|
||||
/// streams.
|
||||
fn initialize_http3_connection(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
qinfo!([self], "Initialize the http3 connection.");
|
||||
qdebug!([self], "Initialize the http3 connection.");
|
||||
self.control_stream_local.create(conn)?;
|
||||
|
||||
self.send_settings();
|
||||
|
|
@ -704,7 +704,7 @@ impl Http3Connection {
|
|||
);
|
||||
}
|
||||
NewStreamType::Decoder => {
|
||||
qinfo!([self], "A new remote qpack encoder stream {}", stream_id);
|
||||
qdebug!([self], "A new remote qpack encoder stream {}", stream_id);
|
||||
self.check_stream_exists(Http3StreamType::Decoder)?;
|
||||
self.recv_streams.insert(
|
||||
stream_id,
|
||||
|
|
@ -715,7 +715,7 @@ impl Http3Connection {
|
|||
);
|
||||
}
|
||||
NewStreamType::Encoder => {
|
||||
qinfo!([self], "A new remote qpack decoder stream {}", stream_id);
|
||||
qdebug!([self], "A new remote qpack decoder stream {}", stream_id);
|
||||
self.check_stream_exists(Http3StreamType::Encoder)?;
|
||||
self.recv_streams.insert(
|
||||
stream_id,
|
||||
|
|
@ -766,7 +766,7 @@ impl Http3Connection {
|
|||
|
||||
/// This is called when an application closes the connection.
|
||||
pub fn close(&mut self, error: AppError) {
|
||||
qinfo!([self], "Close connection error {:?}.", error);
|
||||
qdebug!([self], "Close connection error {:?}.", error);
|
||||
self.state = Http3State::Closing(ConnectionError::Application(error));
|
||||
if (!self.send_streams.is_empty() || !self.recv_streams.is_empty()) && (error == 0) {
|
||||
qwarn!("close(0) called when streams still active");
|
||||
|
|
@ -952,7 +952,7 @@ impl Http3Connection {
|
|||
stream_id: StreamId,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
qinfo!([self], "read_data from stream {}.", stream_id);
|
||||
qdebug!([self], "read_data from stream {}.", stream_id);
|
||||
let res = self
|
||||
.recv_streams
|
||||
.get_mut(&stream_id)
|
||||
|
|
@ -1091,7 +1091,7 @@ impl Http3Connection {
|
|||
|
||||
/// This is called when an application wants to close the sending side of a stream.
|
||||
pub fn stream_close_send(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> {
|
||||
qinfo!([self], "Close the sending side for stream {}.", stream_id);
|
||||
qdebug!([self], "Close the sending side for stream {}.", stream_id);
|
||||
debug_assert!(self.state.active());
|
||||
let send_stream = self
|
||||
.send_streams
|
||||
|
|
@ -1402,7 +1402,7 @@ impl Http3Connection {
|
|||
/// `PriorityUpdateRequestPush` which handling is specific to the client and server, we must
|
||||
/// give them to the specific client/server handler.
|
||||
fn handle_control_frame(&mut self, f: HFrame) -> Res<Option<HFrame>> {
|
||||
qinfo!([self], "Handle a control frame {:?}", f);
|
||||
qdebug!([self], "Handle a control frame {:?}", f);
|
||||
if !matches!(f, HFrame::Settings { .. })
|
||||
&& !matches!(
|
||||
self.settings_state,
|
||||
|
|
@ -1433,7 +1433,7 @@ impl Http3Connection {
|
|||
}
|
||||
|
||||
fn handle_settings(&mut self, new_settings: HSettings) -> Res<()> {
|
||||
qinfo!([self], "Handle SETTINGS frame.");
|
||||
qdebug!([self], "Handle SETTINGS frame.");
|
||||
match &self.settings_state {
|
||||
Http3RemoteSettingsState::NotReceived => {
|
||||
self.set_qpack_settings(&new_settings)?;
|
||||
|
|
|
|||
|
|
@ -590,7 +590,7 @@ impl Http3Client {
|
|||
///
|
||||
/// An error will be return if stream does not exist.
|
||||
pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> {
|
||||
qinfo!([self], "Close sending side stream={}.", stream_id);
|
||||
qdebug!([self], "Close sending side stream={}.", stream_id);
|
||||
self.base_handler
|
||||
.stream_close_send(&mut self.conn, stream_id)
|
||||
}
|
||||
|
|
@ -652,7 +652,7 @@ impl Http3Client {
|
|||
stream_id: StreamId,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
qinfo!([self], "read_data from stream {}.", stream_id);
|
||||
qdebug!([self], "read_data from stream {}.", stream_id);
|
||||
let res = self.base_handler.read_data(&mut self.conn, stream_id, buf);
|
||||
if let Err(e) = &res {
|
||||
if e.connection_error() {
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ impl Http3ServerHandler {
|
|||
///
|
||||
/// An error will be returned if stream does not exist.
|
||||
pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> {
|
||||
qinfo!([self], "Close sending side stream={}.", stream_id);
|
||||
qdebug!([self], "Close sending side stream={}.", stream_id);
|
||||
self.base_handler.stream_close_send(conn, stream_id)?;
|
||||
self.base_handler.stream_has_pending_data(stream_id);
|
||||
self.needs_processing = true;
|
||||
|
|
@ -408,7 +408,7 @@ impl Http3ServerHandler {
|
|||
stream_id: StreamId,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
qinfo!([self], "read_data from stream {}.", stream_id);
|
||||
qdebug!([self], "read_data from stream {}.", stream_id);
|
||||
let res = self.base_handler.read_data(conn, stream_id, buf);
|
||||
if let Err(e) = &res {
|
||||
if e.connection_error() {
|
||||
|
|
|
|||
|
|
@ -271,7 +271,7 @@ impl RecvMessage {
|
|||
}
|
||||
(None, false) => break Ok(()),
|
||||
(Some(frame), fin) => {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
[self],
|
||||
"A new frame has been received: {:?}; state={:?} fin={}",
|
||||
frame,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc};
|
||||
|
||||
use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType};
|
||||
use neqo_common::{qdebug, qtrace, Encoder, Header, MessageType};
|
||||
use neqo_qpack::encoder::QPackEncoder;
|
||||
use neqo_transport::{Connection, StreamId};
|
||||
|
||||
|
|
@ -119,7 +119,7 @@ impl SendMessage {
|
|||
encoder: Rc<RefCell<QPackEncoder>>,
|
||||
conn_events: Box<dyn SendStreamEvents>,
|
||||
) -> Self {
|
||||
qinfo!("Create a request stream_id={}", stream_id);
|
||||
qdebug!("Create a request stream_id={}", stream_id);
|
||||
Self {
|
||||
state: MessageState::WaitingForHeaders,
|
||||
message_type,
|
||||
|
|
@ -193,7 +193,7 @@ impl SendStream for SendMessage {
|
|||
min(buf.len(), available - 9)
|
||||
};
|
||||
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
[self],
|
||||
"send_request_body: available={} to_send={}.",
|
||||
available,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ use std::{
|
|||
rc::Rc,
|
||||
};
|
||||
|
||||
use neqo_common::{qdebug, qinfo, Encoder, Header};
|
||||
use neqo_common::{qdebug, Encoder, Header};
|
||||
use neqo_transport::{
|
||||
server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType,
|
||||
};
|
||||
|
|
@ -189,7 +189,7 @@ impl Http3OrWebTransportStream {
|
|||
///
|
||||
/// It may return `InvalidStreamId` if a stream does not exist anymore.
|
||||
pub fn send_data(&mut self, data: &[u8]) -> Res<usize> {
|
||||
qinfo!([self], "Set new response.");
|
||||
qdebug!([self], "Set new response.");
|
||||
self.stream_handler.send_data(data)
|
||||
}
|
||||
|
||||
|
|
@ -199,7 +199,7 @@ impl Http3OrWebTransportStream {
|
|||
///
|
||||
/// It may return `InvalidStreamId` if a stream does not exist anymore.
|
||||
pub fn stream_close_send(&mut self) -> Res<()> {
|
||||
qinfo!([self], "Set new response.");
|
||||
qdebug!([self], "Set new response.");
|
||||
self.stream_handler.stream_close_send()
|
||||
}
|
||||
}
|
||||
|
|
@ -270,7 +270,7 @@ impl WebTransportRequest {
|
|||
///
|
||||
/// It may return `InvalidStreamId` if a stream does not exist anymore.
|
||||
pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> {
|
||||
qinfo!([self], "Set a response for a WebTransport session.");
|
||||
qdebug!([self], "Set a response for a WebTransport session.");
|
||||
self.stream_handler
|
||||
.handler
|
||||
.borrow_mut()
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"c2152600379c3961ba79e661e164630a63531744f79e082fce39cdf1cbe75ddd","src/decoder.rs":"0675444129e074e9d5d56f0d45d2eaed614c85e22cfe9f2d28cdee912c15b420","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"84649cbee81e050f55d7ea691ac871e072741abd8bbf96303eb2e98aa8ee0aea","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"71ec740426eee0abb6205104e504f5b97f525a76c4a5f5827b78034d28ce1876","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"fd673630b5ed64197851c9a9758685096d3c0aa04f4994290733a38057004ee6","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"755af90fe077b1bcca34a1a2a1bdce5ce601ea490b2ca3f1313e0107d13e67e2","src/reader.rs":"1581261741a0922b147a6975cc8b1a3503846f6dbfdb771d254760c298996982","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"6e16debdceadc453546f247f8316883af9eeeedd12f2070219d8484a0a131d46"},"package":null}
|
||||
{"files":{"Cargo.toml":"3e347de7641de127199b93fcb75f225073ff80fac83feef6067d2c1d6b5d54fe","src/decoder.rs":"0675444129e074e9d5d56f0d45d2eaed614c85e22cfe9f2d28cdee912c15b420","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"84649cbee81e050f55d7ea691ac871e072741abd8bbf96303eb2e98aa8ee0aea","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"71ec740426eee0abb6205104e504f5b97f525a76c4a5f5827b78034d28ce1876","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"fd673630b5ed64197851c9a9758685096d3c0aa04f4994290733a38057004ee6","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"755af90fe077b1bcca34a1a2a1bdce5ce601ea490b2ca3f1313e0107d13e67e2","src/reader.rs":"1581261741a0922b147a6975cc8b1a3503846f6dbfdb771d254760c298996982","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"6e16debdceadc453546f247f8316883af9eeeedd12f2070219d8484a0a131d46"},"package":null}
|
||||
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
2
third_party/rust/neqo-qpack/Cargo.toml
vendored
|
|
@ -13,7 +13,7 @@
|
|||
edition = "2021"
|
||||
rust-version = "1.74.0"
|
||||
name = "neqo-qpack"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
homepage = "https://github.com/mozilla/neqo/"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
4
third_party/rust/neqo-transport/Cargo.toml
vendored
4
third_party/rust/neqo-transport/Cargo.toml
vendored
|
|
@ -13,7 +13,7 @@
|
|||
edition = "2021"
|
||||
rust-version = "1.74.0"
|
||||
name = "neqo-transport"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
authors = ["The Neqo Authors <necko@mozilla.com>"]
|
||||
homepage = "https://github.com/mozilla/neqo/"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
|
@ -73,7 +73,7 @@ path = "../test-fixture"
|
|||
|
||||
[features]
|
||||
bench = []
|
||||
fuzzing = ["neqo-crypto/fuzzing"]
|
||||
disable-encryption = ["neqo-crypto/disable-encryption"]
|
||||
|
||||
[lints.clippy.pedantic]
|
||||
level = "warn"
|
||||
|
|
|
|||
|
|
@ -11,30 +11,32 @@ const CHUNK: u64 = 1000;
|
|||
const END: u64 = 100_000;
|
||||
fn build_coalesce(len: u64) -> RangeTracker {
|
||||
let mut used = RangeTracker::default();
|
||||
used.mark_acked(0, CHUNK as usize);
|
||||
used.mark_sent(CHUNK, END as usize);
|
||||
let chunk = usize::try_from(CHUNK).expect("should fit");
|
||||
used.mark_acked(0, chunk);
|
||||
used.mark_sent(CHUNK, usize::try_from(END).expect("should fit"));
|
||||
// leave a gap or it will coalesce here
|
||||
for i in 2..=len {
|
||||
// These do not get immediately coalesced when marking since they're not at the end or start
|
||||
used.mark_acked(i * CHUNK, CHUNK as usize);
|
||||
used.mark_acked(i * CHUNK, chunk);
|
||||
}
|
||||
used
|
||||
}
|
||||
|
||||
fn coalesce(c: &mut Criterion, count: u64) {
|
||||
let chunk = usize::try_from(CHUNK).expect("should fit");
|
||||
c.bench_function(
|
||||
&format!("coalesce_acked_from_zero {count}+1 entries"),
|
||||
|b| {
|
||||
b.iter_batched_ref(
|
||||
|| build_coalesce(count),
|
||||
|used| {
|
||||
used.mark_acked(CHUNK, CHUNK as usize);
|
||||
used.mark_acked(CHUNK, chunk);
|
||||
let tail = (count + 1) * CHUNK;
|
||||
used.mark_sent(tail, CHUNK as usize);
|
||||
used.mark_acked(tail, CHUNK as usize);
|
||||
used.mark_sent(tail, chunk);
|
||||
used.mark_acked(tail, chunk);
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ fn rx_stream_orderer() {
|
|||
let mut rx = RxStreamOrderer::new();
|
||||
let data: &[u8] = &[0; 1337];
|
||||
|
||||
for i in 0..100000 {
|
||||
for i in 0..100_000 {
|
||||
rx.inbound_frame(i * 1337, data);
|
||||
}
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("RxStreamOrderer::inbound_frame()", |b| {
|
||||
b.iter(rx_stream_orderer)
|
||||
b.iter(rx_stream_orderer);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
use std::time::Duration;
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion};
|
||||
use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion, Throughput};
|
||||
use test_fixture::{
|
||||
boxed,
|
||||
sim::{
|
||||
|
|
@ -20,8 +20,11 @@ const ZERO: Duration = Duration::from_millis(0);
|
|||
const JITTER: Duration = Duration::from_millis(10);
|
||||
const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte
|
||||
|
||||
fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option<impl AsRef<str>>) {
|
||||
c.bench_function(label, |b| {
|
||||
fn benchmark_transfer(c: &mut Criterion, label: &str, seed: &Option<impl AsRef<str>>) {
|
||||
let mut group = c.benchmark_group("transfer");
|
||||
group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap()));
|
||||
group.noise_threshold(0.03);
|
||||
group.bench_function(label, |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let nodes = boxed![
|
||||
|
|
@ -42,15 +45,16 @@ fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option<impl AsRef<st
|
|||
sim.run();
|
||||
},
|
||||
SmallInput,
|
||||
)
|
||||
);
|
||||
});
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn benchmark_transfer_variable(c: &mut Criterion) {
|
||||
benchmark_transfer(
|
||||
c,
|
||||
"Run multiple transfers with varying seeds",
|
||||
std::env::var("SIMULATION_SEED").ok(),
|
||||
&std::env::var("SIMULATION_SEED").ok(),
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -58,7 +62,7 @@ fn benchmark_transfer_fixed(c: &mut Criterion) {
|
|||
benchmark_transfer(
|
||||
c,
|
||||
"Run multiple transfers with the same seed",
|
||||
Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"),
|
||||
&Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"),
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -164,7 +164,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
let mut is_app_limited = true;
|
||||
let mut new_acked = 0;
|
||||
for pkt in acked_pkts {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}",
|
||||
self,
|
||||
pkt.pn,
|
||||
|
|
@ -198,7 +198,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
|
||||
if is_app_limited {
|
||||
self.cc_algorithm.on_app_limited();
|
||||
qinfo!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked);
|
||||
qdebug!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -208,7 +208,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
let increase = min(self.ssthresh - self.congestion_window, self.acked_bytes);
|
||||
self.congestion_window += increase;
|
||||
self.acked_bytes -= increase;
|
||||
qinfo!([self], "slow start += {}", increase);
|
||||
qdebug!([self], "slow start += {}", increase);
|
||||
if self.congestion_window == self.ssthresh {
|
||||
// This doesn't look like it is necessary, but it can happen
|
||||
// after persistent congestion.
|
||||
|
|
@ -249,7 +249,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
QlogMetric::BytesInFlight(self.bytes_in_flight),
|
||||
],
|
||||
);
|
||||
qinfo!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked);
|
||||
qdebug!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked);
|
||||
}
|
||||
|
||||
/// Update congestion controller state based on lost packets.
|
||||
|
|
@ -265,7 +265,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
}
|
||||
|
||||
for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"packet_lost this={:p}, pn={}, ps={}",
|
||||
self,
|
||||
pkt.pn,
|
||||
|
|
@ -286,7 +286,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
pto,
|
||||
lost_packets,
|
||||
);
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"on_packets_lost this={:p}, bytes_in_flight={}, cwnd={}, state={:?}",
|
||||
self,
|
||||
self.bytes_in_flight,
|
||||
|
|
@ -335,7 +335,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
|
|||
}
|
||||
|
||||
self.bytes_in_flight += pkt.size;
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"packet_sent this={:p}, pn={}, ps={}",
|
||||
self,
|
||||
pkt.pn,
|
||||
|
|
@ -498,7 +498,7 @@ impl<T: WindowAdjustment> ClassicCongestionControl<T> {
|
|||
self.congestion_window = max(cwnd, CWND_MIN);
|
||||
self.acked_bytes = acked_bytes;
|
||||
self.ssthresh = self.congestion_window;
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
[self],
|
||||
"Cong event -> recovery; cwnd {}, ssthresh {}",
|
||||
self.congestion_window,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
use std::fmt::Write;
|
||||
|
||||
use neqo_common::{qdebug, Decoder};
|
||||
use neqo_common::{qdebug, Decoder, IpTos};
|
||||
|
||||
use crate::{
|
||||
connection::Connection,
|
||||
|
|
@ -26,6 +26,7 @@ pub fn dump_packet(
|
|||
pt: PacketType,
|
||||
pn: PacketNumber,
|
||||
payload: &[u8],
|
||||
tos: IpTos,
|
||||
) {
|
||||
if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) {
|
||||
return;
|
||||
|
|
@ -38,9 +39,18 @@ pub fn dump_packet(
|
|||
s.push_str(" [broken]...");
|
||||
break;
|
||||
};
|
||||
if let Some(x) = f.dump() {
|
||||
let x = f.dump();
|
||||
if !x.is_empty() {
|
||||
write!(&mut s, "\n {} {}", dir, &x).unwrap();
|
||||
}
|
||||
}
|
||||
qdebug!([conn], "pn={} type={:?} {}{}", pn, pt, path.borrow(), s);
|
||||
qdebug!(
|
||||
[conn],
|
||||
"pn={} type={:?} {} {:?}{}",
|
||||
pn,
|
||||
pt,
|
||||
path.borrow(),
|
||||
tos,
|
||||
s
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ use std::{
|
|||
|
||||
use neqo_common::{
|
||||
event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo,
|
||||
qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role,
|
||||
qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, IpTos, Role,
|
||||
};
|
||||
use neqo_crypto::{
|
||||
agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group,
|
||||
|
|
@ -383,7 +383,6 @@ impl Connection {
|
|||
agent,
|
||||
protocols.iter().map(P::as_ref).map(String::from).collect(),
|
||||
Rc::clone(&tphandler),
|
||||
conn_params.is_fuzzing(),
|
||||
)?;
|
||||
|
||||
let stats = StatsCell::default();
|
||||
|
|
@ -778,7 +777,7 @@ impl Connection {
|
|||
});
|
||||
enc.encode(extra);
|
||||
let records = s.send_ticket(now, enc.as_ref())?;
|
||||
qinfo!([self], "send session ticket {}", hex(&enc));
|
||||
qdebug!([self], "send session ticket {}", hex(&enc));
|
||||
self.crypto.buffer_records(records)?;
|
||||
} else {
|
||||
unreachable!();
|
||||
|
|
@ -824,7 +823,7 @@ impl Connection {
|
|||
/// the connection to fail. However, if no packets have been
|
||||
/// exchanged, it's not OK.
|
||||
pub fn authenticated(&mut self, status: AuthenticationStatus, now: Instant) {
|
||||
qinfo!([self], "Authenticated {:?}", status);
|
||||
qdebug!([self], "Authenticated {:?}", status);
|
||||
self.crypto.tls.authenticated(status);
|
||||
let res = self.handshake(now, self.version, PacketNumberSpace::Handshake, None);
|
||||
self.absorb_error(now, res);
|
||||
|
|
@ -1154,7 +1153,7 @@ impl Connection {
|
|||
|
||||
fn discard_keys(&mut self, space: PacketNumberSpace, now: Instant) {
|
||||
if self.crypto.discard(space) {
|
||||
qinfo!([self], "Drop packet number space {}", space);
|
||||
qdebug!([self], "Drop packet number space {}", space);
|
||||
let primary = self.paths.primary();
|
||||
self.loss_recovery.discard(&primary, space, now);
|
||||
self.acks.drop_space(space);
|
||||
|
|
@ -1492,6 +1491,7 @@ impl Connection {
|
|||
payload.packet_type(),
|
||||
payload.pn(),
|
||||
&payload[..],
|
||||
d.tos(),
|
||||
);
|
||||
|
||||
qlog::packet_received(&mut self.qlog, &packet, &payload);
|
||||
|
|
@ -1552,6 +1552,10 @@ impl Connection {
|
|||
packet: &DecryptedPacket,
|
||||
now: Instant,
|
||||
) -> Res<bool> {
|
||||
(!packet.is_empty())
|
||||
.then_some(())
|
||||
.ok_or(Error::ProtocolViolation)?;
|
||||
|
||||
// TODO(ekr@rtfm.com): Have the server blow away the initial
|
||||
// crypto state if this fails? Otherwise, we will get a panic
|
||||
// on the assert for doesn't exist.
|
||||
|
|
@ -1560,24 +1564,8 @@ impl Connection {
|
|||
let mut ack_eliciting = false;
|
||||
let mut probing = true;
|
||||
let mut d = Decoder::from(&packet[..]);
|
||||
let mut consecutive_padding = 0;
|
||||
while d.remaining() > 0 {
|
||||
let mut f = Frame::decode(&mut d)?;
|
||||
|
||||
// Skip padding
|
||||
while f == Frame::Padding && d.remaining() > 0 {
|
||||
consecutive_padding += 1;
|
||||
f = Frame::decode(&mut d)?;
|
||||
}
|
||||
if consecutive_padding > 0 {
|
||||
qdebug!(
|
||||
[self],
|
||||
"PADDING frame repeated {} times",
|
||||
consecutive_padding
|
||||
);
|
||||
consecutive_padding = 0;
|
||||
}
|
||||
|
||||
let f = Frame::decode(&mut d)?;
|
||||
ack_eliciting |= f.ack_eliciting();
|
||||
probing &= f.path_probing();
|
||||
let t = f.get_type();
|
||||
|
|
@ -2271,6 +2259,7 @@ impl Connection {
|
|||
pt,
|
||||
pn,
|
||||
&builder.as_ref()[payload_start..],
|
||||
IpTos::default(), // TODO: set from path
|
||||
);
|
||||
qlog::packet_sent(
|
||||
&mut self.qlog,
|
||||
|
|
@ -2323,7 +2312,7 @@ impl Connection {
|
|||
}
|
||||
|
||||
if encoder.is_empty() {
|
||||
qinfo!("TX blocked, profile={:?} ", profile);
|
||||
qdebug!("TX blocked, profile={:?} ", profile);
|
||||
Ok(SendOption::No(profile.paced()))
|
||||
} else {
|
||||
// Perform additional padding for Initial packets as necessary.
|
||||
|
|
@ -2367,7 +2356,7 @@ impl Connection {
|
|||
}
|
||||
|
||||
fn client_start(&mut self, now: Instant) -> Res<()> {
|
||||
qinfo!([self], "client_start");
|
||||
qdebug!([self], "client_start");
|
||||
debug_assert_eq!(self.role, Role::Client);
|
||||
qlog::client_connection_started(&mut self.qlog, &self.paths.primary());
|
||||
qlog::client_version_information_initiated(&mut self.qlog, self.conn_params.get_versions());
|
||||
|
|
@ -2599,7 +2588,7 @@ impl Connection {
|
|||
|
||||
fn confirm_version(&mut self, v: Version) {
|
||||
if self.version != v {
|
||||
qinfo!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v);
|
||||
qdebug!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v);
|
||||
}
|
||||
self.crypto.confirm_version(v);
|
||||
self.version = v;
|
||||
|
|
@ -2694,9 +2683,8 @@ impl Connection {
|
|||
.input_frame(&frame, &mut self.stats.borrow_mut().frame_rx);
|
||||
}
|
||||
match frame {
|
||||
Frame::Padding => {
|
||||
// Note: This counts contiguous padding as a single frame.
|
||||
self.stats.borrow_mut().frame_rx.padding += 1;
|
||||
Frame::Padding(length) => {
|
||||
self.stats.borrow_mut().frame_rx.padding += usize::from(length);
|
||||
}
|
||||
Frame::Ping => {
|
||||
// If we get a PING and there are outstanding CRYPTO frames,
|
||||
|
|
@ -2899,7 +2887,7 @@ impl Connection {
|
|||
R: IntoIterator<Item = RangeInclusive<u64>> + Debug,
|
||||
R::IntoIter: ExactSizeIterator,
|
||||
{
|
||||
qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges);
|
||||
qdebug!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges);
|
||||
|
||||
let (acked_packets, lost_packets) = self.loss_recovery.on_ack_received(
|
||||
&self.paths.primary(),
|
||||
|
|
@ -2953,7 +2941,7 @@ impl Connection {
|
|||
}
|
||||
|
||||
fn set_connected(&mut self, now: Instant) -> Res<()> {
|
||||
qinfo!([self], "TLS connection complete");
|
||||
qdebug!([self], "TLS connection complete");
|
||||
if self.crypto.tls.info().map(SecretAgentInfo::alpn).is_none() {
|
||||
qwarn!([self], "No ALPN. Closing connection.");
|
||||
// 120 = no_application_protocol
|
||||
|
|
@ -2996,7 +2984,7 @@ impl Connection {
|
|||
|
||||
fn set_state(&mut self, state: State) {
|
||||
if state > self.state {
|
||||
qinfo!([self], "State change from {:?} -> {:?}", self.state, state);
|
||||
qdebug!([self], "State change from {:?} -> {:?}", self.state, state);
|
||||
self.state = state.clone();
|
||||
if self.state.closed() {
|
||||
self.streams.clear_streams();
|
||||
|
|
|
|||
|
|
@ -77,7 +77,6 @@ pub struct ConnectionParameters {
|
|||
outgoing_datagram_queue: usize,
|
||||
incoming_datagram_queue: usize,
|
||||
fast_pto: u8,
|
||||
fuzzing: bool,
|
||||
grease: bool,
|
||||
pacing: bool,
|
||||
}
|
||||
|
|
@ -100,7 +99,6 @@ impl Default for ConnectionParameters {
|
|||
outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT,
|
||||
incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT,
|
||||
fast_pto: FAST_PTO_SCALE,
|
||||
fuzzing: false,
|
||||
grease: true,
|
||||
pacing: true,
|
||||
}
|
||||
|
|
@ -324,17 +322,6 @@ impl ConnectionParameters {
|
|||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_fuzzing(&self) -> bool {
|
||||
self.fuzzing
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn fuzzing(mut self, enable: bool) -> Self {
|
||||
self.fuzzing = enable;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_greasing(&self) -> bool {
|
||||
self.grease
|
||||
|
|
|
|||
|
|
@ -209,7 +209,10 @@ pub enum StateSignaling {
|
|||
impl StateSignaling {
|
||||
pub fn handshake_done(&mut self) {
|
||||
if !matches!(self, Self::Idle) {
|
||||
debug_assert!(false, "StateSignaling must be in Idle state.");
|
||||
debug_assert!(
|
||||
false,
|
||||
"StateSignaling must be in Idle state but is in {self:?} state.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
*self = Self::HandshakeDone;
|
||||
|
|
|
|||
|
|
@ -16,9 +16,10 @@ use neqo_common::{event::Provider, qdebug, Datagram};
|
|||
use neqo_crypto::{
|
||||
constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus,
|
||||
};
|
||||
#[cfg(not(feature = "disable-encryption"))]
|
||||
use test_fixture::datagram;
|
||||
use test_fixture::{
|
||||
assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, split_datagram,
|
||||
DEFAULT_ADDR,
|
||||
assertions, assertions::assert_coalesced_0rtt, fixture_init, now, split_datagram, DEFAULT_ADDR,
|
||||
};
|
||||
|
||||
use super::{
|
||||
|
|
@ -458,7 +459,7 @@ fn coalesce_05rtt() {
|
|||
assert_eq!(client.stats().dropped_rx, 0); // No Initial padding.
|
||||
assert_eq!(client.stats().packets_rx, 4);
|
||||
assert_eq!(client.stats().saved_datagrams, 1);
|
||||
assert_eq!(client.stats().frame_rx.padding, 1); // Padding uses frames.
|
||||
assert!(client.stats().frame_rx.padding > 0); // Padding uses frames.
|
||||
|
||||
// Allow the handshake to complete.
|
||||
now += RTT / 2;
|
||||
|
|
@ -605,7 +606,7 @@ fn reorder_1rtt() {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "fuzzing"))]
|
||||
#[cfg(not(feature = "disable-encryption"))]
|
||||
#[test]
|
||||
fn corrupted_initial() {
|
||||
let mut client = default_client();
|
||||
|
|
@ -808,7 +809,7 @@ fn anti_amplification() {
|
|||
assert_eq!(*server.state(), State::Confirmed);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "fuzzing"))]
|
||||
#[cfg(not(feature = "disable-encryption"))]
|
||||
#[test]
|
||||
fn garbage_initial() {
|
||||
let mut client = default_client();
|
||||
|
|
|
|||
|
|
@ -37,11 +37,11 @@ mod ackrate;
|
|||
mod cc;
|
||||
mod close;
|
||||
mod datagram;
|
||||
mod fuzzing;
|
||||
mod handshake;
|
||||
mod idle;
|
||||
mod keys;
|
||||
mod migration;
|
||||
mod null;
|
||||
mod priority;
|
||||
mod recovery;
|
||||
mod resumption;
|
||||
|
|
@ -170,12 +170,17 @@ impl crate::connection::test_internal::FrameWriter for PingWriter {
|
|||
}
|
||||
}
|
||||
|
||||
trait DatagramModifier: FnMut(Datagram) -> Option<Datagram> {}
|
||||
|
||||
impl<T> DatagramModifier for T where T: FnMut(Datagram) -> Option<Datagram> {}
|
||||
|
||||
/// Drive the handshake between the client and server.
|
||||
fn handshake(
|
||||
fn handshake_with_modifier(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
now: Instant,
|
||||
rtt: Duration,
|
||||
mut modifier: impl DatagramModifier,
|
||||
) -> Instant {
|
||||
let mut a = client;
|
||||
let mut b = server;
|
||||
|
|
@ -212,7 +217,11 @@ fn handshake(
|
|||
did_ping[a.role()] = true;
|
||||
}
|
||||
assert!(had_input || output.is_some());
|
||||
input = output;
|
||||
if let Some(d) = output {
|
||||
input = modifier(d);
|
||||
} else {
|
||||
input = output;
|
||||
}
|
||||
qtrace!("handshake: t += {:?}", rtt / 2);
|
||||
now += rtt / 2;
|
||||
mem::swap(&mut a, &mut b);
|
||||
|
|
@ -223,6 +232,15 @@ fn handshake(
|
|||
now
|
||||
}
|
||||
|
||||
fn handshake(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
now: Instant,
|
||||
rtt: Duration,
|
||||
) -> Instant {
|
||||
handshake_with_modifier(client, server, now, rtt, Some)
|
||||
}
|
||||
|
||||
fn connect_fail(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
|
|
@ -234,11 +252,12 @@ fn connect_fail(
|
|||
assert_error(server, &ConnectionError::Transport(server_error));
|
||||
}
|
||||
|
||||
fn connect_with_rtt(
|
||||
fn connect_with_rtt_and_modifier(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
now: Instant,
|
||||
rtt: Duration,
|
||||
modifier: impl DatagramModifier,
|
||||
) -> Instant {
|
||||
fn check_rtt(stats: &Stats, rtt: Duration) {
|
||||
assert_eq!(stats.rtt, rtt);
|
||||
|
|
@ -246,7 +265,7 @@ fn connect_with_rtt(
|
|||
let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess);
|
||||
assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt));
|
||||
}
|
||||
let now = handshake(client, server, now, rtt);
|
||||
let now = handshake_with_modifier(client, server, now, rtt, modifier);
|
||||
assert_eq!(*client.state(), State::Confirmed);
|
||||
assert_eq!(*server.state(), State::Confirmed);
|
||||
|
||||
|
|
@ -255,6 +274,15 @@ fn connect_with_rtt(
|
|||
now
|
||||
}
|
||||
|
||||
fn connect_with_rtt(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
now: Instant,
|
||||
rtt: Duration,
|
||||
) -> Instant {
|
||||
connect_with_rtt_and_modifier(client, server, now, rtt, Some)
|
||||
}
|
||||
|
||||
fn connect(client: &mut Connection, server: &mut Connection) {
|
||||
connect_with_rtt(client, server, now(), Duration::new(0, 0));
|
||||
}
|
||||
|
|
@ -301,8 +329,13 @@ fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration,
|
|||
}
|
||||
|
||||
/// Connect with an RTT and then force both peers to be idle.
|
||||
fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant {
|
||||
let now = connect_with_rtt(client, server, now(), rtt);
|
||||
fn connect_rtt_idle_with_modifier(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
rtt: Duration,
|
||||
modifier: impl DatagramModifier,
|
||||
) -> Instant {
|
||||
let now = connect_with_rtt_and_modifier(client, server, now(), rtt, modifier);
|
||||
assert_idle(client, server, rtt, now);
|
||||
// Drain events from both as well.
|
||||
_ = client.events().count();
|
||||
|
|
@ -311,8 +344,20 @@ fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Durat
|
|||
now
|
||||
}
|
||||
|
||||
fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant {
|
||||
connect_rtt_idle_with_modifier(client, server, rtt, Some)
|
||||
}
|
||||
|
||||
fn connect_force_idle_with_modifier(
|
||||
client: &mut Connection,
|
||||
server: &mut Connection,
|
||||
modifier: impl DatagramModifier,
|
||||
) {
|
||||
connect_rtt_idle_with_modifier(client, server, Duration::new(0, 0), modifier);
|
||||
}
|
||||
|
||||
fn connect_force_idle(client: &mut Connection, server: &mut Connection) {
|
||||
connect_rtt_idle(client, server, Duration::new(0, 0));
|
||||
connect_force_idle_with_modifier(client, server, Some);
|
||||
}
|
||||
|
||||
fn fill_stream(c: &mut Connection, stream: StreamId) {
|
||||
|
|
@ -524,12 +569,14 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) {
|
|||
}
|
||||
|
||||
/// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing.
|
||||
/// Takes a modifier function that can be used to modify the datagram before it is sent.
|
||||
/// Return the resulting datagram and the new time.
|
||||
#[must_use]
|
||||
fn send_something_paced(
|
||||
fn send_something_paced_with_modifier(
|
||||
sender: &mut Connection,
|
||||
mut now: Instant,
|
||||
allow_pacing: bool,
|
||||
mut modifier: impl DatagramModifier,
|
||||
) -> (Datagram, Instant) {
|
||||
let stream_id = sender.stream_create(StreamType::UniDi).unwrap();
|
||||
assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok());
|
||||
|
|
@ -544,16 +591,32 @@ fn send_something_paced(
|
|||
.dgram()
|
||||
.expect("send_something: should have something to send")
|
||||
}
|
||||
Output::Datagram(d) => d,
|
||||
Output::Datagram(d) => modifier(d).unwrap(),
|
||||
Output::None => panic!("send_something: got Output::None"),
|
||||
};
|
||||
(dgram, now)
|
||||
}
|
||||
|
||||
fn send_something_paced(
|
||||
sender: &mut Connection,
|
||||
now: Instant,
|
||||
allow_pacing: bool,
|
||||
) -> (Datagram, Instant) {
|
||||
send_something_paced_with_modifier(sender, now, allow_pacing, Some)
|
||||
}
|
||||
|
||||
fn send_something_with_modifier(
|
||||
sender: &mut Connection,
|
||||
now: Instant,
|
||||
modifier: impl DatagramModifier,
|
||||
) -> Datagram {
|
||||
send_something_paced_with_modifier(sender, now, false, modifier).0
|
||||
}
|
||||
|
||||
/// Send something on a stream from `sender` to `receiver`.
|
||||
/// Return the resulting datagram.
|
||||
fn send_something(sender: &mut Connection, now: Instant) -> Datagram {
|
||||
send_something_paced(sender, now, false).0
|
||||
send_something_with_modifier(sender, now, Some)
|
||||
}
|
||||
|
||||
/// Send something on a stream from `sender` to `receiver`.
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![cfg(feature = "fuzzing")]
|
||||
#![cfg(feature = "disable-encryption")]
|
||||
|
||||
use neqo_crypto::FIXED_TAG_FUZZING;
|
||||
use neqo_crypto::aead_null::AEAD_NULL_TAG;
|
||||
use test_fixture::now;
|
||||
|
||||
use super::{connect_force_idle, default_client, default_server};
|
||||
|
|
@ -24,7 +24,7 @@ fn no_encryption() {
|
|||
|
||||
client.stream_send(stream_id, DATA_CLIENT).unwrap();
|
||||
let client_pkt = client.process_output(now()).dgram().unwrap();
|
||||
assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT));
|
||||
assert!(client_pkt[..client_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_CLIENT));
|
||||
|
||||
server.process_input(&client_pkt, now());
|
||||
let mut buf = vec![0; 100];
|
||||
|
|
@ -33,7 +33,7 @@ fn no_encryption() {
|
|||
assert_eq!(&buf[..len], DATA_CLIENT);
|
||||
server.stream_send(stream_id, DATA_SERVER).unwrap();
|
||||
let server_pkt = server.process_output(now()).dgram().unwrap();
|
||||
assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER));
|
||||
assert!(server_pkt[..server_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_SERVER));
|
||||
|
||||
client.process_input(&server_pkt, now());
|
||||
let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap();
|
||||
|
|
@ -116,12 +116,6 @@ fn transfer() {
|
|||
assert!(fin3);
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord)]
|
||||
struct IdEntry {
|
||||
sendorder: StreamOrder,
|
||||
stream_id: StreamId,
|
||||
}
|
||||
|
||||
// tests stream sendorder priorization
|
||||
fn sendorder_test(order_of_sendorder: &[Option<SendOrder>]) {
|
||||
let mut client = default_client();
|
||||
|
|
|
|||
96
third_party/rust/neqo-transport/src/crypto.rs
vendored
96
third_party/rust/neqo-transport/src/crypto.rs
vendored
|
|
@ -69,7 +69,6 @@ impl Crypto {
|
|||
mut agent: Agent,
|
||||
protocols: Vec<String>,
|
||||
tphandler: TpHandler,
|
||||
fuzzing: bool,
|
||||
) -> Res<Self> {
|
||||
agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?;
|
||||
agent.set_ciphers(&[
|
||||
|
|
@ -102,7 +101,6 @@ impl Crypto {
|
|||
tls: agent,
|
||||
streams: CryptoStreams::default(),
|
||||
states: CryptoStates {
|
||||
fuzzing,
|
||||
..CryptoStates::default()
|
||||
},
|
||||
})
|
||||
|
|
@ -317,7 +315,7 @@ impl Crypto {
|
|||
}
|
||||
|
||||
pub fn acked(&mut self, token: &CryptoRecoveryToken) {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"Acked crypto frame space={} offset={} length={}",
|
||||
token.space,
|
||||
token.offset,
|
||||
|
|
@ -367,7 +365,7 @@ impl Crypto {
|
|||
});
|
||||
enc.encode_vvec(new_token.unwrap_or(&[]));
|
||||
enc.encode(t.as_ref());
|
||||
qinfo!("resumption token {}", hex_snip_middle(enc.as_ref()));
|
||||
qdebug!("resumption token {}", hex_snip_middle(enc.as_ref()));
|
||||
Some(ResumptionToken::new(enc.into(), t.expiration_time()))
|
||||
} else {
|
||||
None
|
||||
|
|
@ -420,7 +418,6 @@ pub struct CryptoDxState {
|
|||
/// The total number of operations that are remaining before the keys
|
||||
/// become exhausted and can't be used any more.
|
||||
invocations: PacketNumber,
|
||||
fuzzing: bool,
|
||||
}
|
||||
|
||||
impl CryptoDxState {
|
||||
|
|
@ -431,9 +428,8 @@ impl CryptoDxState {
|
|||
epoch: Epoch,
|
||||
secret: &SymKey,
|
||||
cipher: Cipher,
|
||||
fuzzing: bool,
|
||||
) -> Self {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
"Making {:?} {} CryptoDxState, v={:?} cipher={}",
|
||||
direction,
|
||||
epoch,
|
||||
|
|
@ -445,19 +441,11 @@ impl CryptoDxState {
|
|||
version,
|
||||
direction,
|
||||
epoch: usize::from(epoch),
|
||||
aead: Aead::new(
|
||||
fuzzing,
|
||||
TLS_VERSION_1_3,
|
||||
cipher,
|
||||
secret,
|
||||
version.label_prefix(),
|
||||
)
|
||||
.unwrap(),
|
||||
aead: Aead::new(TLS_VERSION_1_3, cipher, secret, version.label_prefix()).unwrap(),
|
||||
hpkey: HpKey::extract(TLS_VERSION_1_3, cipher, secret, &hplabel).unwrap(),
|
||||
used_pn: 0..0,
|
||||
min_pn: 0,
|
||||
invocations: Self::limit(direction, cipher),
|
||||
fuzzing,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -466,7 +454,6 @@ impl CryptoDxState {
|
|||
direction: CryptoDxDirection,
|
||||
label: &str,
|
||||
dcid: &[u8],
|
||||
fuzzing: bool,
|
||||
) -> Self {
|
||||
qtrace!("new_initial {:?} {}", version, ConnectionIdRef::from(dcid));
|
||||
let salt = version.initial_salt();
|
||||
|
|
@ -482,14 +469,7 @@ impl CryptoDxState {
|
|||
let secret =
|
||||
hkdf::expand_label(TLS_VERSION_1_3, cipher, &initial_secret, &[], label).unwrap();
|
||||
|
||||
Self::new(
|
||||
version,
|
||||
direction,
|
||||
TLS_EPOCH_INITIAL,
|
||||
&secret,
|
||||
cipher,
|
||||
fuzzing,
|
||||
)
|
||||
Self::new(version, direction, TLS_EPOCH_INITIAL, &secret, cipher)
|
||||
}
|
||||
|
||||
/// Determine the confidentiality and integrity limits for the cipher.
|
||||
|
|
@ -549,7 +529,6 @@ impl CryptoDxState {
|
|||
direction: self.direction,
|
||||
epoch: self.epoch + 1,
|
||||
aead: Aead::new(
|
||||
self.fuzzing,
|
||||
TLS_VERSION_1_3,
|
||||
cipher,
|
||||
next_secret,
|
||||
|
|
@ -560,7 +539,6 @@ impl CryptoDxState {
|
|||
used_pn: pn..pn,
|
||||
min_pn: pn,
|
||||
invocations,
|
||||
fuzzing: self.fuzzing,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -696,7 +674,7 @@ impl CryptoDxState {
|
|||
Ok(res.to_vec())
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(feature = "fuzzing")))]
|
||||
#[cfg(all(test, not(feature = "disable-encryption")))]
|
||||
pub(crate) fn test_default() -> Self {
|
||||
// This matches the value in packet.rs
|
||||
const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
|
||||
|
|
@ -705,7 +683,6 @@ impl CryptoDxState {
|
|||
CryptoDxDirection::Write,
|
||||
"server in",
|
||||
CLIENT_CID,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -759,7 +736,6 @@ pub(crate) struct CryptoDxAppData {
|
|||
cipher: Cipher,
|
||||
// Not the secret used to create `self.dx`, but the one needed for the next iteration.
|
||||
next_secret: SymKey,
|
||||
fuzzing: bool,
|
||||
}
|
||||
|
||||
impl CryptoDxAppData {
|
||||
|
|
@ -768,20 +744,11 @@ impl CryptoDxAppData {
|
|||
dir: CryptoDxDirection,
|
||||
secret: &SymKey,
|
||||
cipher: Cipher,
|
||||
fuzzing: bool,
|
||||
) -> Res<Self> {
|
||||
Ok(Self {
|
||||
dx: CryptoDxState::new(
|
||||
version,
|
||||
dir,
|
||||
TLS_EPOCH_APPLICATION_DATA,
|
||||
secret,
|
||||
cipher,
|
||||
fuzzing,
|
||||
),
|
||||
dx: CryptoDxState::new(version, dir, TLS_EPOCH_APPLICATION_DATA, secret, cipher),
|
||||
cipher,
|
||||
next_secret: Self::update_secret(cipher, secret)?,
|
||||
fuzzing,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -800,7 +767,6 @@ impl CryptoDxAppData {
|
|||
dx: self.dx.next(&self.next_secret, self.cipher),
|
||||
cipher: self.cipher,
|
||||
next_secret,
|
||||
fuzzing: self.fuzzing,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -834,7 +800,6 @@ pub struct CryptoStates {
|
|||
// If this is set, then we have noticed a genuine update.
|
||||
// Once this time passes, we should switch in new keys.
|
||||
read_update_time: Option<Instant>,
|
||||
fuzzing: bool,
|
||||
}
|
||||
|
||||
impl CryptoStates {
|
||||
|
|
@ -980,7 +945,7 @@ impl CryptoStates {
|
|||
};
|
||||
|
||||
for v in versions {
|
||||
qinfo!(
|
||||
qdebug!(
|
||||
[self],
|
||||
"Creating initial cipher state v={:?}, role={:?} dcid={}",
|
||||
v,
|
||||
|
|
@ -989,20 +954,8 @@ impl CryptoStates {
|
|||
);
|
||||
|
||||
let mut initial = CryptoState {
|
||||
tx: CryptoDxState::new_initial(
|
||||
*v,
|
||||
CryptoDxDirection::Write,
|
||||
write,
|
||||
dcid,
|
||||
self.fuzzing,
|
||||
),
|
||||
rx: CryptoDxState::new_initial(
|
||||
*v,
|
||||
CryptoDxDirection::Read,
|
||||
read,
|
||||
dcid,
|
||||
self.fuzzing,
|
||||
),
|
||||
tx: CryptoDxState::new_initial(*v, CryptoDxDirection::Write, write, dcid),
|
||||
rx: CryptoDxState::new_initial(*v, CryptoDxDirection::Read, read, dcid),
|
||||
};
|
||||
if let Some(prev) = self.initials.get(v) {
|
||||
qinfo!(
|
||||
|
|
@ -1056,7 +1009,6 @@ impl CryptoStates {
|
|||
TLS_EPOCH_ZERO_RTT,
|
||||
secret,
|
||||
cipher,
|
||||
self.fuzzing,
|
||||
));
|
||||
}
|
||||
|
||||
|
|
@ -1097,7 +1049,6 @@ impl CryptoStates {
|
|||
TLS_EPOCH_HANDSHAKE,
|
||||
write_secret,
|
||||
cipher,
|
||||
self.fuzzing,
|
||||
),
|
||||
rx: CryptoDxState::new(
|
||||
version,
|
||||
|
|
@ -1105,7 +1056,6 @@ impl CryptoStates {
|
|||
TLS_EPOCH_HANDSHAKE,
|
||||
read_secret,
|
||||
cipher,
|
||||
self.fuzzing,
|
||||
),
|
||||
});
|
||||
}
|
||||
|
|
@ -1113,13 +1063,7 @@ impl CryptoStates {
|
|||
pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> {
|
||||
debug_assert!(self.app_write.is_none());
|
||||
debug_assert_ne!(self.cipher, 0);
|
||||
let mut app = CryptoDxAppData::new(
|
||||
version,
|
||||
CryptoDxDirection::Write,
|
||||
secret,
|
||||
self.cipher,
|
||||
self.fuzzing,
|
||||
)?;
|
||||
let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Write, secret, self.cipher)?;
|
||||
if let Some(z) = &self.zero_rtt {
|
||||
if z.direction == CryptoDxDirection::Write {
|
||||
app.dx.continuation(z)?;
|
||||
|
|
@ -1138,13 +1082,7 @@ impl CryptoStates {
|
|||
) -> Res<()> {
|
||||
debug_assert!(self.app_write.is_some(), "should have write keys installed");
|
||||
debug_assert!(self.app_read.is_none());
|
||||
let mut app = CryptoDxAppData::new(
|
||||
version,
|
||||
CryptoDxDirection::Read,
|
||||
secret,
|
||||
self.cipher,
|
||||
self.fuzzing,
|
||||
)?;
|
||||
let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Read, secret, self.cipher)?;
|
||||
if let Some(z) = &self.zero_rtt {
|
||||
if z.direction == CryptoDxDirection::Read {
|
||||
app.dx.continuation(z)?;
|
||||
|
|
@ -1286,7 +1224,7 @@ impl CryptoStates {
|
|||
}
|
||||
|
||||
/// Make some state for removing protection in tests.
|
||||
#[cfg(not(feature = "fuzzing"))]
|
||||
#[cfg(not(feature = "disable-encryption"))]
|
||||
#[cfg(test)]
|
||||
pub(crate) fn test_default() -> Self {
|
||||
let read = |epoch| {
|
||||
|
|
@ -1299,7 +1237,6 @@ impl CryptoStates {
|
|||
dx: read(epoch),
|
||||
cipher: TLS_AES_128_GCM_SHA256,
|
||||
next_secret: hkdf::import_key(TLS_VERSION_1_3, &[0xaa; 32]).unwrap(),
|
||||
fuzzing: false,
|
||||
};
|
||||
let mut initials = HashMap::new();
|
||||
initials.insert(
|
||||
|
|
@ -1319,11 +1256,10 @@ impl CryptoStates {
|
|||
app_read: Some(app_read(3)),
|
||||
app_read_next: Some(app_read(4)),
|
||||
read_update_time: None,
|
||||
fuzzing: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "fuzzing"), test))]
|
||||
#[cfg(all(not(feature = "disable-encryption"), test))]
|
||||
pub(crate) fn test_chacha() -> Self {
|
||||
const SECRET: &[u8] = &[
|
||||
0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad,
|
||||
|
|
@ -1337,7 +1273,6 @@ impl CryptoStates {
|
|||
direction: CryptoDxDirection::Read,
|
||||
epoch,
|
||||
aead: Aead::new(
|
||||
false,
|
||||
TLS_VERSION_1_3,
|
||||
TLS_CHACHA20_POLY1305_SHA256,
|
||||
&secret,
|
||||
|
|
@ -1354,11 +1289,9 @@ impl CryptoStates {
|
|||
used_pn: 0..645_971_972,
|
||||
min_pn: 0,
|
||||
invocations: 10,
|
||||
fuzzing: false,
|
||||
},
|
||||
cipher: TLS_CHACHA20_POLY1305_SHA256,
|
||||
next_secret: secret.clone(),
|
||||
fuzzing: false,
|
||||
};
|
||||
Self {
|
||||
initials: HashMap::new(),
|
||||
|
|
@ -1369,7 +1302,6 @@ impl CryptoStates {
|
|||
app_read: Some(app_read(3)),
|
||||
app_read_next: Some(app_read(4)),
|
||||
read_update_time: None,
|
||||
fuzzing: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
86
third_party/rust/neqo-transport/src/frame.rs
vendored
86
third_party/rust/neqo-transport/src/frame.rs
vendored
|
|
@ -20,7 +20,7 @@ use crate::{
|
|||
#[allow(clippy::module_name_repetitions)]
|
||||
pub type FrameType = u64;
|
||||
|
||||
const FRAME_TYPE_PADDING: FrameType = 0x0;
|
||||
pub const FRAME_TYPE_PADDING: FrameType = 0x0;
|
||||
pub const FRAME_TYPE_PING: FrameType = 0x1;
|
||||
pub const FRAME_TYPE_ACK: FrameType = 0x2;
|
||||
const FRAME_TYPE_ACK_ECN: FrameType = 0x3;
|
||||
|
|
@ -95,6 +95,12 @@ impl From<ConnectionError> for CloseError {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<std::array::TryFromSliceError> for Error {
|
||||
fn from(_err: std::array::TryFromSliceError) -> Self {
|
||||
Self::FrameEncodingError
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Debug, Default, Clone)]
|
||||
pub struct AckRange {
|
||||
pub(crate) gap: u64,
|
||||
|
|
@ -103,7 +109,7 @@ pub struct AckRange {
|
|||
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub enum Frame<'a> {
|
||||
Padding,
|
||||
Padding(u16),
|
||||
Ping,
|
||||
Ack {
|
||||
largest_acknowledged: u64,
|
||||
|
|
@ -213,9 +219,10 @@ impl<'a> Frame<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_type(&self) -> FrameType {
|
||||
match self {
|
||||
Self::Padding => FRAME_TYPE_PADDING,
|
||||
Self::Padding { .. } => FRAME_TYPE_PADDING,
|
||||
Self::Ping => FRAME_TYPE_PING,
|
||||
Self::Ack { .. } => FRAME_TYPE_ACK, // We don't do ACK ECN.
|
||||
Self::ResetStream { .. } => FRAME_TYPE_RESET_STREAM,
|
||||
|
|
@ -254,6 +261,7 @@ impl<'a> Frame<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_stream(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
|
|
@ -269,6 +277,7 @@ impl<'a> Frame<'a> {
|
|||
)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn stream_type(fin: bool, nonzero_offset: bool, fill: bool) -> u64 {
|
||||
let mut t = FRAME_TYPE_STREAM;
|
||||
if fin {
|
||||
|
|
@ -285,19 +294,21 @@ impl<'a> Frame<'a> {
|
|||
|
||||
/// If the frame causes a recipient to generate an ACK within its
|
||||
/// advertised maximum acknowledgement delay.
|
||||
#[must_use]
|
||||
pub fn ack_eliciting(&self) -> bool {
|
||||
!matches!(
|
||||
self,
|
||||
Self::Ack { .. } | Self::Padding | Self::ConnectionClose { .. }
|
||||
Self::Ack { .. } | Self::Padding { .. } | Self::ConnectionClose { .. }
|
||||
)
|
||||
}
|
||||
|
||||
/// If the frame can be sent in a path probe
|
||||
/// without initiating migration to that path.
|
||||
#[must_use]
|
||||
pub fn path_probing(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Padding
|
||||
Self::Padding { .. }
|
||||
| Self::NewConnectionId { .. }
|
||||
| Self::PathChallenge { .. }
|
||||
| Self::PathResponse { .. }
|
||||
|
|
@ -307,6 +318,10 @@ impl<'a> Frame<'a> {
|
|||
/// Converts `AckRanges` as encoded in a ACK frame (see -transport
|
||||
/// 19.3.1) into ranges of acked packets (end, start), inclusive of
|
||||
/// start and end values.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the ranges are invalid.
|
||||
pub fn decode_ack_frame(
|
||||
largest_acked: u64,
|
||||
first_ack_range: u64,
|
||||
|
|
@ -347,36 +362,36 @@ impl<'a> Frame<'a> {
|
|||
Ok(acked_ranges)
|
||||
}
|
||||
|
||||
pub fn dump(&self) -> Option<String> {
|
||||
#[must_use]
|
||||
pub fn dump(&self) -> String {
|
||||
match self {
|
||||
Self::Crypto { offset, data } => Some(format!(
|
||||
"Crypto {{ offset: {}, len: {} }}",
|
||||
offset,
|
||||
data.len()
|
||||
)),
|
||||
Self::Crypto { offset, data } => {
|
||||
format!("Crypto {{ offset: {}, len: {} }}", offset, data.len())
|
||||
}
|
||||
Self::Stream {
|
||||
stream_id,
|
||||
offset,
|
||||
fill,
|
||||
data,
|
||||
fin,
|
||||
} => Some(format!(
|
||||
} => format!(
|
||||
"Stream {{ stream_id: {}, offset: {}, len: {}{}, fin: {} }}",
|
||||
stream_id.as_u64(),
|
||||
offset,
|
||||
if *fill { ">>" } else { "" },
|
||||
data.len(),
|
||||
fin,
|
||||
)),
|
||||
Self::Padding => None,
|
||||
Self::Datagram { data, .. } => Some(format!("Datagram {{ len: {} }}", data.len())),
|
||||
_ => Some(format!("{self:?}")),
|
||||
),
|
||||
Self::Padding(length) => format!("Padding {{ len: {length} }}"),
|
||||
Self::Datagram { data, .. } => format!("Datagram {{ len: {} }}", data.len()),
|
||||
_ => format!("{self:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_allowed(&self, pt: PacketType) -> bool {
|
||||
match self {
|
||||
Self::Padding | Self::Ping => true,
|
||||
Self::Padding { .. } | Self::Ping => true,
|
||||
Self::Crypto { .. }
|
||||
| Self::Ack { .. }
|
||||
| Self::ConnectionClose {
|
||||
|
|
@ -388,6 +403,9 @@ impl<'a> Frame<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the frame cannot be decoded.
|
||||
#[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement.
|
||||
pub fn decode(dec: &mut Decoder<'a>) -> Res<Self> {
|
||||
/// Maximum ACK Range Count in ACK Frame
|
||||
|
|
@ -409,13 +427,23 @@ impl<'a> Frame<'a> {
|
|||
}
|
||||
|
||||
// TODO(ekr@rtfm.com): check for minimal encoding
|
||||
let t = d(dec.decode_varint())?;
|
||||
let t = dv(dec)?;
|
||||
match t {
|
||||
FRAME_TYPE_PADDING => Ok(Self::Padding),
|
||||
FRAME_TYPE_PADDING => {
|
||||
let mut length: u16 = 1;
|
||||
while let Some(b) = dec.peek_byte() {
|
||||
if u64::from(b) != FRAME_TYPE_PADDING {
|
||||
break;
|
||||
}
|
||||
length += 1;
|
||||
dec.skip(1);
|
||||
}
|
||||
Ok(Self::Padding(length))
|
||||
}
|
||||
FRAME_TYPE_PING => Ok(Self::Ping),
|
||||
FRAME_TYPE_RESET_STREAM => Ok(Self::ResetStream {
|
||||
stream_id: StreamId::from(dv(dec)?),
|
||||
application_error_code: d(dec.decode_varint())?,
|
||||
application_error_code: dv(dec)?,
|
||||
final_size: match dec.decode_varint() {
|
||||
Some(v) => v,
|
||||
_ => return Err(Error::NoMoreData),
|
||||
|
|
@ -457,12 +485,12 @@ impl<'a> Frame<'a> {
|
|||
}
|
||||
FRAME_TYPE_STOP_SENDING => Ok(Self::StopSending {
|
||||
stream_id: StreamId::from(dv(dec)?),
|
||||
application_error_code: d(dec.decode_varint())?,
|
||||
application_error_code: dv(dec)?,
|
||||
}),
|
||||
FRAME_TYPE_CRYPTO => {
|
||||
let offset = dv(dec)?;
|
||||
let data = d(dec.decode_vvec())?;
|
||||
if offset + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) {
|
||||
if offset + u64::try_from(data.len())? > ((1 << 62) - 1) {
|
||||
return Err(Error::FrameEncodingError);
|
||||
}
|
||||
Ok(Self::Crypto { offset, data })
|
||||
|
|
@ -489,7 +517,7 @@ impl<'a> Frame<'a> {
|
|||
qtrace!("STREAM frame, with length");
|
||||
d(dec.decode_vvec())?
|
||||
};
|
||||
if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) {
|
||||
if o + u64::try_from(data.len())? > ((1 << 62) - 1) {
|
||||
return Err(Error::FrameEncodingError);
|
||||
}
|
||||
Ok(Self::Stream {
|
||||
|
|
@ -538,7 +566,7 @@ impl<'a> Frame<'a> {
|
|||
return Err(Error::DecodingFrame);
|
||||
}
|
||||
let srt = d(dec.decode(16))?;
|
||||
let stateless_reset_token = <&[_; 16]>::try_from(srt).unwrap();
|
||||
let stateless_reset_token = <&[_; 16]>::try_from(srt)?;
|
||||
|
||||
Ok(Self::NewConnectionId {
|
||||
sequence_number,
|
||||
|
|
@ -563,7 +591,7 @@ impl<'a> Frame<'a> {
|
|||
Ok(Self::PathResponse { data: datav })
|
||||
}
|
||||
FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT | FRAME_TYPE_CONNECTION_CLOSE_APPLICATION => {
|
||||
let error_code = CloseError::from_type_bit(t, d(dec.decode_varint())?);
|
||||
let error_code = CloseError::from_type_bit(t, dv(dec)?);
|
||||
let frame_type = if t == FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT {
|
||||
dv(dec)?
|
||||
} else {
|
||||
|
|
@ -631,8 +659,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn padding() {
|
||||
let f = Frame::Padding;
|
||||
let f = Frame::Padding(1);
|
||||
just_dec(&f, "00");
|
||||
let f = Frame::Padding(2);
|
||||
just_dec(&f, "0000");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
@ -888,8 +918,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_compare() {
|
||||
let f1 = Frame::Padding;
|
||||
let f2 = Frame::Padding;
|
||||
let f1 = Frame::Padding(1);
|
||||
let f2 = Frame::Padding(1);
|
||||
let f3 = Frame::Crypto {
|
||||
offset: 0,
|
||||
data: &[1, 2, 3],
|
||||
|
|
|
|||
4
third_party/rust/neqo-transport/src/lib.rs
vendored
4
third_party/rust/neqo-transport/src/lib.rs
vendored
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
|
||||
|
||||
use neqo_common::qinfo;
|
||||
use neqo_common::qwarn;
|
||||
use neqo_crypto::Error as CryptoError;
|
||||
|
||||
mod ackrate;
|
||||
|
|
@ -165,7 +165,7 @@ impl Error {
|
|||
|
||||
impl From<CryptoError> for Error {
|
||||
fn from(err: CryptoError) -> Self {
|
||||
qinfo!("Crypto operation failed {:?}", err);
|
||||
qwarn!("Crypto operation failed {:?}", err);
|
||||
match err {
|
||||
CryptoError::EchRetry(config) => Self::EchRetry(config),
|
||||
_ => Self::CryptoError(err),
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ use neqo_crypto::random;
|
|||
use crate::{
|
||||
cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN},
|
||||
crypto::{CryptoDxState, CryptoSpace, CryptoStates},
|
||||
frame::FRAME_TYPE_PADDING,
|
||||
version::{Version, WireVersion},
|
||||
Error, Res,
|
||||
};
|
||||
|
|
@ -255,9 +256,14 @@ impl PacketBuilder {
|
|||
/// Maybe pad with "PADDING" frames.
|
||||
/// Only does so if padding was needed and this is a short packet.
|
||||
/// Returns true if padding was added.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Cannot happen.
|
||||
pub fn pad(&mut self) -> bool {
|
||||
if self.padding && !self.is_long() {
|
||||
self.encoder.pad_to(self.limit, 0);
|
||||
self.encoder
|
||||
.pad_to(self.limit, FRAME_TYPE_PADDING.try_into().unwrap());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
|
|
@ -288,6 +294,10 @@ impl PacketBuilder {
|
|||
/// The length is filled in after calling `build`.
|
||||
/// Does nothing if there isn't 4 bytes available other than render this builder
|
||||
/// unusable; if `remaining()` returns 0 at any point, call `abort()`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if the packet number length is too large.
|
||||
pub fn pn(&mut self, pn: PacketNumber, pn_len: usize) {
|
||||
if self.remaining() < 4 {
|
||||
self.limit = 0;
|
||||
|
|
@ -352,6 +362,10 @@ impl PacketBuilder {
|
|||
}
|
||||
|
||||
/// Build the packet and return the encoder.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an error if the packet is too large.
|
||||
pub fn build(mut self, crypto: &mut CryptoDxState) -> Res<Encoder> {
|
||||
if self.len() > self.limit {
|
||||
qwarn!("Packet contents are more than the limit");
|
||||
|
|
@ -376,7 +390,9 @@ impl PacketBuilder {
|
|||
|
||||
// Calculate the mask.
|
||||
let offset = SAMPLE_OFFSET - self.offsets.pn.len();
|
||||
assert!(offset + SAMPLE_SIZE <= ciphertext.len());
|
||||
if offset + SAMPLE_SIZE > ciphertext.len() {
|
||||
return Err(Error::InternalError);
|
||||
}
|
||||
let sample = &ciphertext[offset..offset + SAMPLE_SIZE];
|
||||
let mask = crypto.compute_mask(sample)?;
|
||||
|
||||
|
|
@ -410,6 +426,10 @@ impl PacketBuilder {
|
|||
/// As this is a simple packet, this is just an associated function.
|
||||
/// As Retry is odd (it has to be constructed with leading bytes),
|
||||
/// this returns a [`Vec<u8>`] rather than building on an encoder.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an error if AEAD encrypt fails.
|
||||
#[allow(clippy::similar_names)] // scid and dcid are fine here.
|
||||
pub fn retry(
|
||||
version: Version,
|
||||
|
|
@ -443,6 +463,7 @@ impl PacketBuilder {
|
|||
|
||||
/// Make a Version Negotiation packet.
|
||||
#[allow(clippy::similar_names)] // scid and dcid are fine here.
|
||||
#[must_use]
|
||||
pub fn version_negotiation(
|
||||
dcid: &[u8],
|
||||
scid: &[u8],
|
||||
|
|
@ -554,6 +575,10 @@ impl<'a> PublicPacket<'a> {
|
|||
|
||||
/// Decode the common parts of a packet. This provides minimal parsing and validation.
|
||||
/// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an error if the packet could not be decoded.
|
||||
#[allow(clippy::similar_names)] // For dcid and scid, which are fine.
|
||||
pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> {
|
||||
let mut decoder = Decoder::new(data);
|
||||
|
|
@ -585,7 +610,7 @@ impl<'a> PublicPacket<'a> {
|
|||
}
|
||||
|
||||
// Generic long header.
|
||||
let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?).unwrap();
|
||||
let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?;
|
||||
let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?);
|
||||
let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?);
|
||||
|
||||
|
|
@ -645,11 +670,14 @@ impl<'a> PublicPacket<'a> {
|
|||
}
|
||||
|
||||
/// Validate the given packet as though it were a retry.
|
||||
#[must_use]
|
||||
pub fn is_valid_retry(&self, odcid: &ConnectionId) -> bool {
|
||||
if self.packet_type != PacketType::Retry {
|
||||
return false;
|
||||
}
|
||||
let version = self.version().unwrap();
|
||||
let Some(version) = self.version() else {
|
||||
return false;
|
||||
};
|
||||
let expansion = retry::expansion(version);
|
||||
if self.data.len() <= expansion {
|
||||
return false;
|
||||
|
|
@ -665,6 +693,7 @@ impl<'a> PublicPacket<'a> {
|
|||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_valid_initial(&self) -> bool {
|
||||
// Packet has to be an initial, with a DCID of 8 bytes, or a token.
|
||||
// Note: the Server class validates the token and checks the length.
|
||||
|
|
@ -672,32 +701,42 @@ impl<'a> PublicPacket<'a> {
|
|||
&& (self.dcid().len() >= 8 || !self.token.is_empty())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn packet_type(&self) -> PacketType {
|
||||
self.packet_type
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn dcid(&self) -> ConnectionIdRef<'a> {
|
||||
self.dcid
|
||||
}
|
||||
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if called for a short header packet.
|
||||
#[must_use]
|
||||
pub fn scid(&self) -> ConnectionIdRef<'a> {
|
||||
self.scid
|
||||
.expect("should only be called for long header packets")
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn token(&self) -> &'a [u8] {
|
||||
self.token
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn version(&self) -> Option<Version> {
|
||||
self.version.and_then(|v| Version::try_from(v).ok())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn wire_version(&self) -> WireVersion {
|
||||
debug_assert!(self.version.is_some());
|
||||
self.version.unwrap_or(0)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
|
@ -725,14 +764,10 @@ impl<'a> PublicPacket<'a> {
|
|||
assert_ne!(self.packet_type, PacketType::Retry);
|
||||
assert_ne!(self.packet_type, PacketType::VersionNegotiation);
|
||||
|
||||
qtrace!(
|
||||
"unmask hdr={}",
|
||||
hex(&self.data[..self.header_len + SAMPLE_OFFSET])
|
||||
);
|
||||
|
||||
let sample_offset = self.header_len + SAMPLE_OFFSET;
|
||||
let mask = if let Some(sample) = self.data.get(sample_offset..(sample_offset + SAMPLE_SIZE))
|
||||
{
|
||||
qtrace!("unmask hdr={}", hex(&self.data[..sample_offset]));
|
||||
crypto.compute_mask(sample)
|
||||
} else {
|
||||
Err(Error::NoMoreData)
|
||||
|
|
@ -776,6 +811,9 @@ impl<'a> PublicPacket<'a> {
|
|||
))
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an error if the packet cannot be decrypted.
|
||||
pub fn decrypt(&self, crypto: &mut CryptoStates, release_at: Instant) -> Res<DecryptedPacket> {
|
||||
let cspace: CryptoSpace = self.packet_type.into();
|
||||
// When we don't have a version, the crypto code doesn't need a version
|
||||
|
|
@ -790,7 +828,9 @@ impl<'a> PublicPacket<'a> {
|
|||
// too small (which is public information).
|
||||
let (key_phase, pn, header, body) = self.decrypt_header(rx)?;
|
||||
qtrace!([rx], "decoded header: {:?}", header);
|
||||
let rx = crypto.rx(version, cspace, key_phase).unwrap();
|
||||
let Some(rx) = crypto.rx(version, cspace, key_phase) else {
|
||||
return Err(Error::DecryptError);
|
||||
};
|
||||
let version = rx.version(); // Version fixup; see above.
|
||||
let d = rx.decrypt(pn, &header, body)?;
|
||||
// If this is the first packet ever successfully decrypted
|
||||
|
|
@ -813,8 +853,14 @@ impl<'a> PublicPacket<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// This will return an error if the packet is not a version negotiation packet
|
||||
/// or if the versions cannot be decoded.
|
||||
pub fn supported_versions(&self) -> Res<Vec<WireVersion>> {
|
||||
assert_eq!(self.packet_type, PacketType::VersionNegotiation);
|
||||
if self.packet_type != PacketType::VersionNegotiation {
|
||||
return Err(Error::InvalidPacket);
|
||||
}
|
||||
let mut decoder = Decoder::new(&self.data[self.header_len..]);
|
||||
let mut res = Vec::new();
|
||||
while decoder.remaining() > 0 {
|
||||
|
|
@ -845,14 +891,17 @@ pub struct DecryptedPacket {
|
|||
}
|
||||
|
||||
impl DecryptedPacket {
|
||||
#[must_use]
|
||||
pub fn version(&self) -> Version {
|
||||
self.version
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn packet_type(&self) -> PacketType {
|
||||
self.pt
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn pn(&self) -> PacketNumber {
|
||||
self.pn
|
||||
}
|
||||
|
|
@ -866,7 +915,7 @@ impl Deref for DecryptedPacket {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(feature = "fuzzing")))]
|
||||
#[cfg(all(test, not(feature = "disable-encryption")))]
|
||||
mod tests {
|
||||
use neqo_common::Encoder;
|
||||
use test_fixture::{fixture_init, now};
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ fn make_aead(version: Version) -> Aead {
|
|||
|
||||
let secret = hkdf::import_key(TLS_VERSION_1_3, version.retry_secret()).unwrap();
|
||||
Aead::new(
|
||||
false,
|
||||
TLS_VERSION_1_3,
|
||||
TLS_AES_128_GCM_SHA256,
|
||||
&secret,
|
||||
|
|
|
|||
2
third_party/rust/neqo-transport/src/path.rs
vendored
2
third_party/rust/neqo-transport/src/path.rs
vendored
|
|
@ -216,7 +216,7 @@ impl Paths {
|
|||
/// to a migration from a peer, in which case the old path needs to be probed.
|
||||
#[must_use]
|
||||
fn select_primary(&mut self, path: &PathRef) -> Option<PathRef> {
|
||||
qinfo!([path.borrow()], "set as primary path");
|
||||
qdebug!([path.borrow()], "set as primary path");
|
||||
let old_path = self.primary.replace(Rc::clone(path)).map(|old| {
|
||||
old.borrow_mut().set_primary(false);
|
||||
old
|
||||
|
|
|
|||
349
third_party/rust/neqo-transport/src/qlog.rs
vendored
349
third_party/rust/neqo-transport/src/qlog.rs
vendored
|
|
@ -195,7 +195,7 @@ pub fn packet_sent(
|
|||
) {
|
||||
qlog.add_event_with_stream(|stream| {
|
||||
let mut d = Decoder::from(body);
|
||||
let header = PacketHeader::with_type(to_qlog_pkt_type(pt), Some(pn), None, None, None);
|
||||
let header = PacketHeader::with_type(pt.into(), Some(pn), None, None, None);
|
||||
let raw = RawInfo {
|
||||
length: Some(plen as u64),
|
||||
payload_length: None,
|
||||
|
|
@ -205,7 +205,7 @@ pub fn packet_sent(
|
|||
let mut frames = SmallVec::new();
|
||||
while d.remaining() > 0 {
|
||||
if let Ok(f) = Frame::decode(&mut d) {
|
||||
frames.push(frame_to_qlogframe(&f));
|
||||
frames.push(QuicFrame::from(&f));
|
||||
} else {
|
||||
qinfo!("qlog: invalid frame");
|
||||
break;
|
||||
|
|
@ -231,13 +231,8 @@ pub fn packet_sent(
|
|||
|
||||
pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) {
|
||||
qlog.add_event_data(|| {
|
||||
let header = PacketHeader::with_type(
|
||||
to_qlog_pkt_type(public_packet.packet_type()),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let header =
|
||||
PacketHeader::with_type(public_packet.packet_type().into(), None, None, None, None);
|
||||
let raw = RawInfo {
|
||||
length: Some(public_packet.len() as u64),
|
||||
payload_length: None,
|
||||
|
|
@ -259,8 +254,7 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) {
|
|||
pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) {
|
||||
qlog.add_event_with_stream(|stream| {
|
||||
for pkt in pkts {
|
||||
let header =
|
||||
PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None);
|
||||
let header = PacketHeader::with_type(pkt.pt.into(), Some(pkt.pn), None, None, None);
|
||||
|
||||
let ev_data = EventData::PacketLost(PacketLost {
|
||||
header: Some(header),
|
||||
|
|
@ -283,7 +277,7 @@ pub fn packet_received(
|
|||
let mut d = Decoder::from(&payload[..]);
|
||||
|
||||
let header = PacketHeader::with_type(
|
||||
to_qlog_pkt_type(public_packet.packet_type()),
|
||||
public_packet.packet_type().into(),
|
||||
Some(payload.pn()),
|
||||
None,
|
||||
None,
|
||||
|
|
@ -299,7 +293,7 @@ pub fn packet_received(
|
|||
|
||||
while d.remaining() > 0 {
|
||||
if let Ok(f) = Frame::decode(&mut d) {
|
||||
frames.push(frame_to_qlogframe(&f));
|
||||
frames.push(QuicFrame::from(&f));
|
||||
} else {
|
||||
qinfo!("qlog: invalid frame");
|
||||
break;
|
||||
|
|
@ -393,173 +387,180 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) {
|
|||
|
||||
#[allow(clippy::too_many_lines)] // Yeah, but it's a nice match.
|
||||
#[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here.
|
||||
fn frame_to_qlogframe(frame: &Frame) -> QuicFrame {
|
||||
match frame {
|
||||
Frame::Padding => QuicFrame::Padding,
|
||||
Frame::Ping => QuicFrame::Ping,
|
||||
Frame::Ack {
|
||||
largest_acknowledged,
|
||||
ack_delay,
|
||||
first_ack_range,
|
||||
ack_ranges,
|
||||
} => {
|
||||
let ranges =
|
||||
Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges).ok();
|
||||
impl From<&Frame<'_>> for QuicFrame {
|
||||
fn from(frame: &Frame) -> Self {
|
||||
match frame {
|
||||
// TODO: Add payload length to `QuicFrame::Padding` once
|
||||
// https://github.com/cloudflare/quiche/pull/1745 is available via the qlog crate.
|
||||
Frame::Padding { .. } => QuicFrame::Padding,
|
||||
Frame::Ping => QuicFrame::Ping,
|
||||
Frame::Ack {
|
||||
largest_acknowledged,
|
||||
ack_delay,
|
||||
first_ack_range,
|
||||
ack_ranges,
|
||||
} => {
|
||||
let ranges =
|
||||
Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges)
|
||||
.ok();
|
||||
|
||||
let acked_ranges = ranges.map(|all| {
|
||||
AckedRanges::Double(
|
||||
all.into_iter()
|
||||
.map(RangeInclusive::into_inner)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
let acked_ranges = ranges.map(|all| {
|
||||
AckedRanges::Double(
|
||||
all.into_iter()
|
||||
.map(RangeInclusive::into_inner)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
|
||||
QuicFrame::Ack {
|
||||
ack_delay: Some(*ack_delay as f32 / 1000.0),
|
||||
acked_ranges,
|
||||
ect1: None,
|
||||
ect0: None,
|
||||
ce: None,
|
||||
QuicFrame::Ack {
|
||||
ack_delay: Some(*ack_delay as f32 / 1000.0),
|
||||
acked_ranges,
|
||||
ect1: None,
|
||||
ect0: None,
|
||||
ce: None,
|
||||
}
|
||||
}
|
||||
Frame::ResetStream {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
final_size,
|
||||
} => QuicFrame::ResetStream {
|
||||
stream_id: stream_id.as_u64(),
|
||||
error_code: *application_error_code,
|
||||
final_size: *final_size,
|
||||
},
|
||||
Frame::StopSending {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
} => QuicFrame::StopSending {
|
||||
stream_id: stream_id.as_u64(),
|
||||
error_code: *application_error_code,
|
||||
},
|
||||
Frame::Crypto { offset, data } => QuicFrame::Crypto {
|
||||
offset: *offset,
|
||||
length: data.len() as u64,
|
||||
},
|
||||
Frame::NewToken { token } => QuicFrame::NewToken {
|
||||
token: qlog::Token {
|
||||
ty: Some(qlog::TokenType::Retry),
|
||||
details: None,
|
||||
raw: Some(RawInfo {
|
||||
data: Some(hex(token)),
|
||||
length: Some(token.len() as u64),
|
||||
payload_length: None,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Frame::Stream {
|
||||
fin,
|
||||
stream_id,
|
||||
offset,
|
||||
data,
|
||||
..
|
||||
} => QuicFrame::Stream {
|
||||
stream_id: stream_id.as_u64(),
|
||||
offset: *offset,
|
||||
length: data.len() as u64,
|
||||
fin: Some(*fin),
|
||||
raw: None,
|
||||
},
|
||||
Frame::MaxData { maximum_data } => QuicFrame::MaxData {
|
||||
maximum: *maximum_data,
|
||||
},
|
||||
Frame::MaxStreamData {
|
||||
stream_id,
|
||||
maximum_stream_data,
|
||||
} => QuicFrame::MaxStreamData {
|
||||
stream_id: stream_id.as_u64(),
|
||||
maximum: *maximum_stream_data,
|
||||
},
|
||||
Frame::MaxStreams {
|
||||
stream_type,
|
||||
maximum_streams,
|
||||
} => QuicFrame::MaxStreams {
|
||||
stream_type: match stream_type {
|
||||
NeqoStreamType::BiDi => StreamType::Bidirectional,
|
||||
NeqoStreamType::UniDi => StreamType::Unidirectional,
|
||||
},
|
||||
maximum: *maximum_streams,
|
||||
},
|
||||
Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit },
|
||||
Frame::StreamDataBlocked {
|
||||
stream_id,
|
||||
stream_data_limit,
|
||||
} => QuicFrame::StreamDataBlocked {
|
||||
stream_id: stream_id.as_u64(),
|
||||
limit: *stream_data_limit,
|
||||
},
|
||||
Frame::StreamsBlocked {
|
||||
stream_type,
|
||||
stream_limit,
|
||||
} => QuicFrame::StreamsBlocked {
|
||||
stream_type: match stream_type {
|
||||
NeqoStreamType::BiDi => StreamType::Bidirectional,
|
||||
NeqoStreamType::UniDi => StreamType::Unidirectional,
|
||||
},
|
||||
limit: *stream_limit,
|
||||
},
|
||||
Frame::NewConnectionId {
|
||||
sequence_number,
|
||||
retire_prior,
|
||||
connection_id,
|
||||
stateless_reset_token,
|
||||
} => QuicFrame::NewConnectionId {
|
||||
sequence_number: *sequence_number as u32,
|
||||
retire_prior_to: *retire_prior as u32,
|
||||
connection_id_length: Some(connection_id.len() as u8),
|
||||
connection_id: hex(connection_id),
|
||||
stateless_reset_token: Some(hex(stateless_reset_token)),
|
||||
},
|
||||
Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId {
|
||||
sequence_number: *sequence_number as u32,
|
||||
},
|
||||
Frame::PathChallenge { data } => QuicFrame::PathChallenge {
|
||||
data: Some(hex(data)),
|
||||
},
|
||||
Frame::PathResponse { data } => QuicFrame::PathResponse {
|
||||
data: Some(hex(data)),
|
||||
},
|
||||
Frame::ConnectionClose {
|
||||
error_code,
|
||||
frame_type,
|
||||
reason_phrase,
|
||||
} => QuicFrame::ConnectionClose {
|
||||
error_space: match error_code {
|
||||
CloseError::Transport(_) => Some(ErrorSpace::TransportError),
|
||||
CloseError::Application(_) => Some(ErrorSpace::ApplicationError),
|
||||
},
|
||||
error_code: Some(error_code.code()),
|
||||
error_code_value: Some(0),
|
||||
reason: Some(String::from_utf8_lossy(reason_phrase).to_string()),
|
||||
trigger_frame_type: Some(*frame_type),
|
||||
},
|
||||
Frame::HandshakeDone => QuicFrame::HandshakeDone,
|
||||
Frame::AckFrequency { .. } => QuicFrame::Unknown {
|
||||
frame_type_value: None,
|
||||
raw_frame_type: frame.get_type(),
|
||||
raw: None,
|
||||
},
|
||||
Frame::Datagram { data, .. } => QuicFrame::Datagram {
|
||||
length: data.len() as u64,
|
||||
raw: None,
|
||||
},
|
||||
}
|
||||
Frame::ResetStream {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
final_size,
|
||||
} => QuicFrame::ResetStream {
|
||||
stream_id: stream_id.as_u64(),
|
||||
error_code: *application_error_code,
|
||||
final_size: *final_size,
|
||||
},
|
||||
Frame::StopSending {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
} => QuicFrame::StopSending {
|
||||
stream_id: stream_id.as_u64(),
|
||||
error_code: *application_error_code,
|
||||
},
|
||||
Frame::Crypto { offset, data } => QuicFrame::Crypto {
|
||||
offset: *offset,
|
||||
length: data.len() as u64,
|
||||
},
|
||||
Frame::NewToken { token } => QuicFrame::NewToken {
|
||||
token: qlog::Token {
|
||||
ty: Some(qlog::TokenType::Retry),
|
||||
details: None,
|
||||
raw: Some(RawInfo {
|
||||
data: Some(hex(token)),
|
||||
length: Some(token.len() as u64),
|
||||
payload_length: None,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Frame::Stream {
|
||||
fin,
|
||||
stream_id,
|
||||
offset,
|
||||
data,
|
||||
..
|
||||
} => QuicFrame::Stream {
|
||||
stream_id: stream_id.as_u64(),
|
||||
offset: *offset,
|
||||
length: data.len() as u64,
|
||||
fin: Some(*fin),
|
||||
raw: None,
|
||||
},
|
||||
Frame::MaxData { maximum_data } => QuicFrame::MaxData {
|
||||
maximum: *maximum_data,
|
||||
},
|
||||
Frame::MaxStreamData {
|
||||
stream_id,
|
||||
maximum_stream_data,
|
||||
} => QuicFrame::MaxStreamData {
|
||||
stream_id: stream_id.as_u64(),
|
||||
maximum: *maximum_stream_data,
|
||||
},
|
||||
Frame::MaxStreams {
|
||||
stream_type,
|
||||
maximum_streams,
|
||||
} => QuicFrame::MaxStreams {
|
||||
stream_type: match stream_type {
|
||||
NeqoStreamType::BiDi => StreamType::Bidirectional,
|
||||
NeqoStreamType::UniDi => StreamType::Unidirectional,
|
||||
},
|
||||
maximum: *maximum_streams,
|
||||
},
|
||||
Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit },
|
||||
Frame::StreamDataBlocked {
|
||||
stream_id,
|
||||
stream_data_limit,
|
||||
} => QuicFrame::StreamDataBlocked {
|
||||
stream_id: stream_id.as_u64(),
|
||||
limit: *stream_data_limit,
|
||||
},
|
||||
Frame::StreamsBlocked {
|
||||
stream_type,
|
||||
stream_limit,
|
||||
} => QuicFrame::StreamsBlocked {
|
||||
stream_type: match stream_type {
|
||||
NeqoStreamType::BiDi => StreamType::Bidirectional,
|
||||
NeqoStreamType::UniDi => StreamType::Unidirectional,
|
||||
},
|
||||
limit: *stream_limit,
|
||||
},
|
||||
Frame::NewConnectionId {
|
||||
sequence_number,
|
||||
retire_prior,
|
||||
connection_id,
|
||||
stateless_reset_token,
|
||||
} => QuicFrame::NewConnectionId {
|
||||
sequence_number: *sequence_number as u32,
|
||||
retire_prior_to: *retire_prior as u32,
|
||||
connection_id_length: Some(connection_id.len() as u8),
|
||||
connection_id: hex(connection_id),
|
||||
stateless_reset_token: Some(hex(stateless_reset_token)),
|
||||
},
|
||||
Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId {
|
||||
sequence_number: *sequence_number as u32,
|
||||
},
|
||||
Frame::PathChallenge { data } => QuicFrame::PathChallenge {
|
||||
data: Some(hex(data)),
|
||||
},
|
||||
Frame::PathResponse { data } => QuicFrame::PathResponse {
|
||||
data: Some(hex(data)),
|
||||
},
|
||||
Frame::ConnectionClose {
|
||||
error_code,
|
||||
frame_type,
|
||||
reason_phrase,
|
||||
} => QuicFrame::ConnectionClose {
|
||||
error_space: match error_code {
|
||||
CloseError::Transport(_) => Some(ErrorSpace::TransportError),
|
||||
CloseError::Application(_) => Some(ErrorSpace::ApplicationError),
|
||||
},
|
||||
error_code: Some(error_code.code()),
|
||||
error_code_value: Some(0),
|
||||
reason: Some(String::from_utf8_lossy(reason_phrase).to_string()),
|
||||
trigger_frame_type: Some(*frame_type),
|
||||
},
|
||||
Frame::HandshakeDone => QuicFrame::HandshakeDone,
|
||||
Frame::AckFrequency { .. } => QuicFrame::Unknown {
|
||||
frame_type_value: None,
|
||||
raw_frame_type: frame.get_type(),
|
||||
raw: None,
|
||||
},
|
||||
Frame::Datagram { data, .. } => QuicFrame::Datagram {
|
||||
length: data.len() as u64,
|
||||
raw: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn to_qlog_pkt_type(ptype: PacketType) -> qlog::events::quic::PacketType {
|
||||
match ptype {
|
||||
PacketType::Initial => qlog::events::quic::PacketType::Initial,
|
||||
PacketType::Handshake => qlog::events::quic::PacketType::Handshake,
|
||||
PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt,
|
||||
PacketType::Short => qlog::events::quic::PacketType::OneRtt,
|
||||
PacketType::Retry => qlog::events::quic::PacketType::Retry,
|
||||
PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation,
|
||||
PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown,
|
||||
impl From<PacketType> for qlog::events::quic::PacketType {
|
||||
fn from(value: PacketType) -> Self {
|
||||
match value {
|
||||
PacketType::Initial => qlog::events::quic::PacketType::Initial,
|
||||
PacketType::Handshake => qlog::events::quic::PacketType::Handshake,
|
||||
PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt,
|
||||
PacketType::Short => qlog::events::quic::PacketType::OneRtt,
|
||||
PacketType::Retry => qlog::events::quic::PacketType::Retry,
|
||||
PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation,
|
||||
PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
89
third_party/rust/neqo-transport/src/server.rs
vendored
89
third_party/rust/neqo-transport/src/server.rs
vendored
|
|
@ -15,12 +15,12 @@ use std::{
|
|||
ops::{Deref, DerefMut},
|
||||
path::PathBuf,
|
||||
rc::{Rc, Weak},
|
||||
time::{Duration, Instant},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use neqo_common::{
|
||||
self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn,
|
||||
timer::Timer, Datagram, Decoder, Role,
|
||||
Datagram, Decoder, Role,
|
||||
};
|
||||
use neqo_crypto::{
|
||||
encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult,
|
||||
|
|
@ -46,13 +46,6 @@ pub enum InitialResult {
|
|||
/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish
|
||||
/// a new connection across all QUIC versions this server supports.
|
||||
const MIN_INITIAL_PACKET_SIZE: usize = 1200;
|
||||
/// The size of timer buckets. This is higher than the actual timer granularity
|
||||
/// as this depends on there being some distribution of events.
|
||||
const TIMER_GRANULARITY: Duration = Duration::from_millis(4);
|
||||
/// The number of buckets in the timer. As mentioned in the definition of `Timer`,
|
||||
/// the granularity and capacity need to multiply to be larger than the largest
|
||||
/// delay that might be used. That's the idle timeout (currently 30s).
|
||||
const TIMER_CAPACITY: usize = 16384;
|
||||
|
||||
type StateRef = Rc<RefCell<ServerConnectionState>>;
|
||||
type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
|
||||
|
|
@ -61,7 +54,21 @@ type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
|
|||
pub struct ServerConnectionState {
|
||||
c: Connection,
|
||||
active_attempt: Option<AttemptKey>,
|
||||
last_timer: Instant,
|
||||
wake_at: Option<Instant>,
|
||||
}
|
||||
|
||||
impl ServerConnectionState {
|
||||
fn set_wake_at(&mut self, at: Instant) {
|
||||
self.wake_at = Some(at);
|
||||
}
|
||||
|
||||
fn needs_waking(&self, now: Instant) -> bool {
|
||||
self.wake_at.map_or(false, |t| t <= now)
|
||||
}
|
||||
|
||||
fn woken(&mut self) {
|
||||
self.wake_at = None;
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for ServerConnectionState {
|
||||
|
|
@ -174,8 +181,8 @@ pub struct Server {
|
|||
active: HashSet<ActiveConnectionRef>,
|
||||
/// The set of connections that need immediate processing.
|
||||
waiting: VecDeque<StateRef>,
|
||||
/// Outstanding timers for connections.
|
||||
timers: Timer<StateRef>,
|
||||
/// The latest [`Output::Callback`] returned from [`Server::process`].
|
||||
wake_at: Option<Instant>,
|
||||
/// Address validation logic, which determines whether we send a Retry.
|
||||
address_validation: Rc<RefCell<AddressValidation>>,
|
||||
/// Directory to create qlog traces in
|
||||
|
|
@ -219,10 +226,10 @@ impl Server {
|
|||
connections: Rc::default(),
|
||||
active: HashSet::default(),
|
||||
waiting: VecDeque::default(),
|
||||
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
|
||||
address_validation: Rc::new(RefCell::new(validation)),
|
||||
qlog_dir: None,
|
||||
ech_config: None,
|
||||
wake_at: None,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -260,11 +267,6 @@ impl Server {
|
|||
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
|
||||
}
|
||||
|
||||
fn remove_timer(&mut self, c: &StateRef) {
|
||||
let last = c.borrow().last_timer;
|
||||
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
|
||||
}
|
||||
|
||||
fn process_connection(
|
||||
&mut self,
|
||||
c: &StateRef,
|
||||
|
|
@ -280,16 +282,12 @@ impl Server {
|
|||
}
|
||||
Output::Callback(delay) => {
|
||||
let next = now + delay;
|
||||
if next != c.borrow().last_timer {
|
||||
qtrace!([self], "Change timer to {:?}", next);
|
||||
self.remove_timer(c);
|
||||
c.borrow_mut().last_timer = next;
|
||||
self.timers.add(next, Rc::clone(c));
|
||||
c.borrow_mut().set_wake_at(next);
|
||||
if self.wake_at.map_or(true, |c| c > next) {
|
||||
self.wake_at = Some(next);
|
||||
}
|
||||
}
|
||||
Output::None => {
|
||||
self.remove_timer(c);
|
||||
}
|
||||
Output::None => {}
|
||||
}
|
||||
if c.borrow().has_events() {
|
||||
qtrace!([self], "Connection active: {:?}", c);
|
||||
|
|
@ -507,7 +505,7 @@ impl Server {
|
|||
self.setup_connection(&mut c, &attempt_key, initial, orig_dcid);
|
||||
let c = Rc::new(RefCell::new(ServerConnectionState {
|
||||
c,
|
||||
last_timer: now,
|
||||
wake_at: None,
|
||||
active_attempt: Some(attempt_key.clone()),
|
||||
}));
|
||||
cid_mgr.borrow_mut().set_connection(&c);
|
||||
|
|
@ -646,24 +644,28 @@ impl Server {
|
|||
return Some(d);
|
||||
}
|
||||
}
|
||||
qtrace!([self], "No packet to send still, run timers");
|
||||
while let Some(c) = self.timers.take_next(now) {
|
||||
if let Some(d) = self.process_connection(&c, None, now) {
|
||||
return Some(d);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn next_time(&mut self, now: Instant) -> Option<Duration> {
|
||||
if self.waiting.is_empty() {
|
||||
self.timers.next_time().map(|x| x - now)
|
||||
} else {
|
||||
Some(Duration::new(0, 0))
|
||||
qtrace!([self], "No packet to send still, check wake up times");
|
||||
loop {
|
||||
let connection = self
|
||||
.connections
|
||||
.borrow()
|
||||
.values()
|
||||
.find(|c| c.borrow().needs_waking(now))
|
||||
.cloned()?;
|
||||
let datagram = self.process_connection(&connection, None, now);
|
||||
connection.borrow_mut().woken();
|
||||
if datagram.is_some() {
|
||||
return datagram;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output {
|
||||
if self.wake_at.map_or(false, |c| c <= now) {
|
||||
self.wake_at = None;
|
||||
}
|
||||
|
||||
dgram
|
||||
.and_then(|d| self.process_input(d, now))
|
||||
.or_else(|| self.process_next_output(now))
|
||||
|
|
@ -671,12 +673,7 @@ impl Server {
|
|||
qtrace!([self], "Send packet: {:?}", d);
|
||||
Output::Datagram(d)
|
||||
})
|
||||
.or_else(|| {
|
||||
self.next_time(now).map(|delay| {
|
||||
qtrace!([self], "Wait: {:?}", delay);
|
||||
Output::Callback(delay)
|
||||
})
|
||||
})
|
||||
.or_else(|| self.wake_at.take().map(|c| Output::Callback(c - now)))
|
||||
.unwrap_or_else(|| {
|
||||
qtrace!([self], "Go dormant");
|
||||
Output::None
|
||||
|
|
|
|||
6
third_party/rust/neqo-transport/src/stats.rs
vendored
6
third_party/rust/neqo-transport/src/stats.rs
vendored
|
|
@ -14,7 +14,7 @@ use std::{
|
|||
time::Duration,
|
||||
};
|
||||
|
||||
use neqo_common::qinfo;
|
||||
use neqo_common::qwarn;
|
||||
|
||||
use crate::packet::PacketNumber;
|
||||
|
||||
|
|
@ -168,7 +168,7 @@ impl Stats {
|
|||
|
||||
pub fn pkt_dropped(&mut self, reason: impl AsRef<str>) {
|
||||
self.dropped_rx += 1;
|
||||
qinfo!(
|
||||
qwarn!(
|
||||
[self.info],
|
||||
"Dropped received packet: {}; Total: {}",
|
||||
reason.as_ref(),
|
||||
|
|
@ -206,7 +206,7 @@ impl Debug for Stats {
|
|||
" tx: {} lost {} lateack {} ptoack {}",
|
||||
self.packets_tx, self.lost, self.late_ack, self.pto_ack
|
||||
)?;
|
||||
writeln!(f, " resumed: {} ", self.resumed)?;
|
||||
writeln!(f, " resumed: {}", self.resumed)?;
|
||||
writeln!(f, " frames rx:")?;
|
||||
self.frame_rx.fmt(f)?;
|
||||
writeln!(f, " frames tx:")?;
|
||||
|
|
|
|||
|
|
@ -146,14 +146,7 @@ pub fn initial_aead_and_hp(dcid: &[u8], role: Role) -> (Aead, HpKey) {
|
|||
)
|
||||
.unwrap();
|
||||
(
|
||||
Aead::new(
|
||||
false,
|
||||
TLS_VERSION_1_3,
|
||||
TLS_AES_128_GCM_SHA256,
|
||||
&secret,
|
||||
"quic ",
|
||||
)
|
||||
.unwrap(),
|
||||
Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap(),
|
||||
HpKey::extract(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic hp").unwrap(),
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// Tests with the test vectors from the spec.
|
||||
|
||||
#![cfg(not(feature = "fuzzing"))]
|
||||
#![cfg(not(feature = "disable-encryption"))]
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
|
||||
|
|
|
|||
|
|
@ -127,6 +127,76 @@ fn reorder_server_initial() {
|
|||
assert_eq!(*client.state(), State::Confirmed);
|
||||
}
|
||||
|
||||
fn set_payload(server_packet: &Option<Datagram>, client_dcid: &[u8], payload: &[u8]) -> Datagram {
|
||||
let (server_initial, _server_hs) = split_datagram(server_packet.as_ref().unwrap());
|
||||
let (protected_header, _, _, orig_payload) =
|
||||
decode_initial_header(&server_initial, Role::Server);
|
||||
|
||||
// Now decrypt the packet.
|
||||
let (aead, hp) = initial_aead_and_hp(client_dcid, Role::Server);
|
||||
let (mut header, pn) = remove_header_protection(&hp, protected_header, orig_payload);
|
||||
assert_eq!(pn, 0);
|
||||
// Re-encode the packet number as four bytes, so we have enough material for the header
|
||||
// protection sample if payload is empty.
|
||||
let pn_pos = header.len() - 2;
|
||||
header[pn_pos] = u8::try_from(4 + aead.expansion()).unwrap();
|
||||
header.resize(header.len() + 3, 0);
|
||||
header[0] |= 0b0000_0011; // Set the packet number length to 4.
|
||||
|
||||
// And build a packet containing the given payload.
|
||||
let mut packet = header.clone();
|
||||
packet.resize(header.len() + payload.len() + aead.expansion(), 0);
|
||||
aead.encrypt(pn, &header, payload, &mut packet[header.len()..])
|
||||
.unwrap();
|
||||
apply_header_protection(&hp, &mut packet, protected_header.len()..header.len());
|
||||
Datagram::new(
|
||||
server_initial.source(),
|
||||
server_initial.destination(),
|
||||
server_initial.tos(),
|
||||
server_initial.ttl(),
|
||||
packet,
|
||||
)
|
||||
}
|
||||
|
||||
/// Test that the stack treats a packet without any frames as a protocol violation.
|
||||
#[test]
|
||||
fn packet_without_frames() {
|
||||
let mut client = new_client(
|
||||
ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]),
|
||||
);
|
||||
let mut server = default_server();
|
||||
|
||||
let client_initial = client.process_output(now());
|
||||
let (_, client_dcid, _, _) =
|
||||
decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client);
|
||||
|
||||
let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram();
|
||||
let modified = set_payload(&server_packet, client_dcid, &[]);
|
||||
client.process_input(&modified, now());
|
||||
assert_eq!(
|
||||
client.state(),
|
||||
&State::Closed(ConnectionError::Transport(Error::ProtocolViolation))
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that the stack permits a packet containing only padding.
|
||||
#[test]
|
||||
fn packet_with_only_padding() {
|
||||
let mut client = new_client(
|
||||
ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]),
|
||||
);
|
||||
let mut server = default_server();
|
||||
|
||||
let client_initial = client.process_output(now());
|
||||
let (_, client_dcid, _, _) =
|
||||
decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client);
|
||||
|
||||
let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram();
|
||||
let modified = set_payload(&server_packet, client_dcid, &[0]);
|
||||
client.process_input(&modified, now());
|
||||
assert_eq!(client.state(), &State::WaitInitial);
|
||||
}
|
||||
|
||||
/// Overflow the crypto buffer.
|
||||
#[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine.
|
||||
#[test]
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![cfg(not(feature = "fuzzing"))]
|
||||
#![cfg(not(feature = "disable-encryption"))]
|
||||
|
||||
mod common;
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue